repo_name
stringclasses
6 values
pr_number
int64
99
20.3k
pr_title
stringlengths
8
158
pr_description
stringlengths
0
6.54k
author
stringlengths
4
18
date_created
unknown
date_merged
unknown
previous_commit
stringlengths
40
40
pr_commit
stringlengths
40
40
query
stringlengths
37
6.57k
filepath
stringlengths
8
153
before_content
stringlengths
0
876M
after_content
stringlengths
0
876M
label
int64
-1
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/big_bird/modeling_big_bird.py
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BigBird model.""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_big_bird import BigBirdConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base" _CONFIG_FOR_DOC = "BigBirdConfig" BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/bigbird-roberta-base", "google/bigbird-roberta-large", "google/bigbird-base-trivia-itc", # See all BigBird models at https://huggingface.co/models?filter=big_bird ] _TRIVIA_QA_MAPPING = { "big_bird_attention": "attention/self", "output_layer_norm": "output/LayerNorm", "attention_output": "attention/output/dense", "output": "output/dense", "self_attention_layer_norm": "attention/output/LayerNorm", "intermediate": "intermediate/dense", "word_embeddings": "bert/embeddings/word_embeddings", "position_embedding": "bert/embeddings/position_embeddings", "type_embeddings": "bert/embeddings/token_type_embeddings", "embeddings": "bert/embeddings", "layer_normalization": "output/LayerNorm", "layer_norm": "LayerNorm", "trivia_qa_head": "qa_classifier", "dense": "intermediate/dense", "dense_1": "qa_outputs", } def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False): """Load tf checkpoints in a pytorch model.""" def load_tf_weights_bert(init_vars, tf_path): names = [] tf_weights = {} for name, shape in init_vars: array = tf.train.load_variable(tf_path, name) name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm") logger.info(f"Loading TF weight {name} with shape {shape}") names.append(name) tf_weights[name] = array return names, tf_weights def load_tf_weights_trivia_qa(init_vars): names = [] tf_weights = {} for i, var in enumerate(init_vars): name_items = var.name.split("/") if "transformer_scaffold" in name_items[0]: layer_name_items = name_items[0].split("_") if len(layer_name_items) < 3: layer_name_items += [0] name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}" name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[ :-2 ] # remove last :0 in variable if "self/attention/output" in name: name = name.replace("self/attention/output", "output") if i >= len(init_vars) - 2: name = name.replace("intermediate", "output") logger.info(f"Loading TF weight {name} with shape {var.shape}") array = var.value().numpy() names.append(name) tf_weights[name] = array return names, tf_weights try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path) if len(init_vars) <= 0: raise ValueError("Loaded trained variables cannot be empty.") pt_names = list(model.state_dict().keys()) if is_trivia_qa: names, tf_weights = load_tf_weights_trivia_qa(init_vars) else: names, tf_weights = load_tf_weights_bert(init_vars, tf_path) for txt_name in names: array = tf_weights[txt_name] name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model pt_name = [] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") pt_name.append("bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") pt_name.append("classifier") elif scope_names[0] == "transform": pointer = getattr(pointer, "transform") pt_name.append("transform") if ("bias" in name) or ("kernel" in name): pointer = getattr(pointer, "dense") pt_name.append("dense") elif ("beta" in name) or ("gamma" in name): pointer = getattr(pointer, "LayerNorm") pt_name.append("LayerNorm") else: try: pointer = getattr(pointer, scope_names[0]) pt_name.append(f"{scope_names[0]}") except AttributeError: logger.info(f"Skipping {m_name}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] pt_name.append(f"{num}") if m_name[-11:] == "_embeddings" or m_name == "embeddings": pointer = getattr(pointer, "weight") pt_name.append("weight") elif m_name == "kernel": array = np.transpose(array) try: if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape): # print(txt_name, array.shape) if ( txt_name.endswith("attention/self/key/kernel") or txt_name.endswith("attention/self/query/kernel") or txt_name.endswith("attention/self/value/kernel") ): array = array.transpose(1, 0, 2).reshape(pointer.shape) elif txt_name.endswith("attention/output/dense/kernel"): array = array.transpose(0, 2, 1).reshape(pointer.shape) else: array = array.reshape(pointer.shape) if pointer.shape != array.shape: raise ValueError( f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}." ) except AssertionError as e: e.args += (pointer.shape, array.shape) raise pt_weight_name = ".".join(pt_name) logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.") pointer.data = torch.from_numpy(array) tf_weights.pop(txt_name, None) pt_names.remove(pt_weight_name) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.") return model class BigBirdEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.rescale_embeddings = config.rescale_embeddings self.hidden_size = config.hidden_size def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.rescale_embeddings: inputs_embeds = inputs_embeds * (self.hidden_size**0.5) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.dropout(embeddings) embeddings = self.LayerNorm(embeddings) return embeddings class BigBirdSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BigBirdBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None, ): # Currently this `class` can't be used in decoder. batch_size, seqlen, _ = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size if from_seq_length % from_block_size != 0: raise ValueError("Query sided sequence length must be multiple of block size") if to_seq_length % to_block_size != 0: raise ValueError("Key/Value sided sequence length must be multiple of block size") query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) context_layer, attention_probs = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication""" # faster replacement of torch.einsum ("bhqk,bhkd->bhqd") return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view( inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]) ) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication with transpose""" # faster replacement of torch.einsum (bhqd,bhkd->bhqk) return torch.bmm( inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2) ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions, ): # BigBird block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention) # hence following code can be divided into 5 parts. if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 # generate random attention and corresponding masks np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: # old plans used in paper rand_attn = [ self._bigbird_block_rand_mask( self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024 )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) # preparing block for randn attn gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax( first_product, dim=-1 ) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax( second_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = torch.cat( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = torch.cat( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = nn.functional.softmax( band_product, dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax( second_last_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) # combining representations of all tokens context_layer = torch.cat( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2, ) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) # this is just for visualizing; forward pass doesn't depend on following code if output_attentions: # TODO(PVP): need to verify if below code is correct attention_probs = torch.zeros( bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device ) # 1st query block # corresponding to `first_context_layer` attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global # 2nd query block # corresponding to `second_context_layer` attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] # 1st three key blocks (global + sliding) attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] # last key block (global) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Middle query blocks # corresponding to `context_layer` # sliding keys for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view( bsz, n_heads, from_block_size, 3, to_block_size ) # inner_band_product # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view( bsz, n_heads, -1, to_block_size ) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view( bsz, n_heads, -1, to_block_size ) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Second-last query block # corresponding to `second_last_context_layer` attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] # 1st key block (global) attention_probs[ :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size : ] = second_last_attn_weights[ :, :, :, to_block_size : 4 * to_block_size ] # last three blocks (global + sliding) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # last query block # corresponding to `last_context_layer` attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global else: attention_probs = None return context_layer, attention_probs @staticmethod def torch_gather_b2(params, indices): # this operation is equivalent to tf.gather when batch_dims=2 if params.shape[:2] != indices.shape[:2]: raise ValueError( "Make sure that the first two dimensions of params and indices are identical, but" f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}" ) num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] indices_shift = ( torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) // num_indices_to_gather * num_indices_to_pick_from ) flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks @staticmethod def _bigbird_block_rand_mask( from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are chosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") if from_seq_length not in plan_from_length: raise ValueError("Error from sequence length not in plan!") # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = np.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads) ] # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) # permute the blocks perm_block = np.random.permutation(to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird class BigBirdSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.attention_type = config.attention_type self.config = config self.seed = seed if self.config.attention_type == "original_full": self.self = BigBirdSelfAttention(config) elif self.config.attention_type == "block_sparse": self.self = BigBirdBlockSparseAttention(config, seed) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}" ) self.output = BigBirdSelfOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value if value == "original_full": # copy all weights to new full attention class attn_weights = BigBirdSelfAttention(self.config) else: # copy all weights to new sparse attention class attn_weights = BigBirdBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, # block_sparse config band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, ): # fp16 compatibility if band_mask is not None: band_mask = band_mask.to(hidden_states.dtype) if from_mask is not None: from_mask = from_mask.to(hidden_states.dtype) if to_mask is not None: to_mask = to_mask.to(hidden_states.dtype) if self.attention_type == "original_full": self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: if encoder_hidden_states is not None: raise ValueError("BigBird cannot be used as a decoder when config.attention_type != 'original_full'") self_outputs = self.self( hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird class BigBirdIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird class BigBirdOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdLayer(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.attention_type = config.attention_type self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BigBirdAttention(config, seed=seed) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise TypeError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BigBirdAttention(config) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.attention.set_attention_type(value) if self.add_cross_attention: self.crossattention.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " " cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BigBirdEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_type = config.attention_type self.layer = nn.ModuleList( [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value for layer in self.layer: layer.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, return_dict=True, ) -> Union[BaseModelOutputWithPastAndCrossAttentions, Tuple]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird class BigBirdPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird class BigBirdLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BigBirdPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird class BigBirdOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird class BigBirdOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird class BigBirdPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BigBirdPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BigBirdConfig load_tf_weights = load_tf_weights_in_big_bird base_model_prefix = "bert" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, BigBirdEncoder): module.gradient_checkpointing = value BIG_BIRD_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BigBirdConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BIG_BIRD_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @dataclass class BigBirdForPreTrainingOutput(ModelOutput): """ Output type of [`BigBirdForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BigBirdForQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). pooler_output (`torch.FloatTensor` of shape `(batch_size, 1)`): pooler output from BigBigModel hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( "The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.", BIG_BIRD_START_DOCSTRING, ) class BigBirdModel(BigBirdPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.attention_type = self.config.attention_type self.config = config self.block_size = self.config.block_size self.embeddings = BigBirdEmbeddings(config) self.encoder = BigBirdEncoder(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() else: self.pooler = None self.activation = None if self.attention_type != "original_full" and config.add_cross_attention: logger.warning( "When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting" " `attention_type=original_full`" ) self.set_attention_type("original_full") # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.encoder.set_attention_type(value) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # in order to use block_sparse attention, sequence_length has to be at least # bigger than all global attentions: 2 * block_size # + sliding tokens: 3 * block_size # + random tokens: 2 * num_random_blocks * block_size max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend: # change attention_type from block_sparse to original_full sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) logger.warning( "Attention type 'block_sparse' is not possible if sequence_length: " f"{sequence_length} <= num global tokens: 2 * config.block_size " "+ min. num sliding tokens: 3 * config.block_size " "+ config.num_random_blocks * config.block_size " "+ additional buffer: config.num_random_blocks * config.block_size " f"= {max_tokens_to_attend} with config.block_size " f"= {self.config.block_size}, config.num_random_blocks " f"= {self.config.num_random_blocks}. " "Changing attention type to 'original_full'..." ) self.set_attention_type("original_full") if self.attention_type == "block_sparse": ( padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, ) = self._pad_to_block_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id, ) else: padding_len = 0 if self.attention_type == "block_sparse": blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.block_size ) extended_attention_mask = None elif self.attention_type == "original_full": blocked_encoder_mask = None band_mask = None from_mask = None to_mask = None # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.attention_type}" ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, blocked_encoder_mask=blocked_encoder_mask, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None # undo padding if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) sequence_output = sequence_output[:, :-padding_len] if not return_dict: return (sequence_output, pooler_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooler_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @staticmethod def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): batch_size, seq_length = attention_mask.size() if seq_length % block_size != 0: raise ValueError( f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block" f" size is {block_size}." ) def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = torch.cat( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2 ) band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def _pad_to_block_size( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int, ): """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.""" # padding block_size = self.config.block_size input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.info( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.block_size`: {block_size}" ) if input_ids is not None: input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full( (batch_size, padding_len), self.config.pad_token_id, dtype=torch.long, ) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( attention_mask, (0, padding_len), value=False ) # no attention on the padding tokens token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds class BigBirdForPreTraining(BigBirdPreTrainedModel): _keys_to_ignore_on_load_missing = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, next_sentence_label: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BigBirdForPreTrainingOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be added to masked_lm loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (`Dict[str, any]`, optional, defaults to *{}*): Used to hide legacy arguments that have been deprecated. Returns: Example: ```python >>> from transformers import AutoTokenizer, BigBirdForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None: loss_fct = CrossEntropyLoss() total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if next_sentence_label is not None and total_loss is not None: next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = total_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""BigBird Model with a `language modeling` head on top.""", BIG_BIRD_START_DOCSTRING) class BigBirdForMaskedLM(BigBirdPreTrainedModel): _keys_to_ignore_on_load_missing = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForMaskedLM >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random long article >>> LONG_ARTICLE_TARGET = squad_ds[81514]["context"] >>> # select random sentence >>> LONG_ARTICLE_TARGET[332:398] 'the highest values are very close to the theoretical maximum value' >>> # add mask_token >>> LONG_ARTICLE_TO_MASK = LONG_ARTICLE_TARGET.replace("maximum", "[MASK]") >>> inputs = tokenizer(LONG_ARTICLE_TO_MASK, return_tensors="pt") >>> # long article input >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) 'maximum' ``` ```python >>> labels = tokenizer(LONG_ARTICLE_TARGET, return_tensors="pt")["input_ids"] >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) 1.08 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token if self.config.pad_token_id is None: raise ValueError("The PAD token should be defined for generation") attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @add_start_docstrings( """BigBird Model with a `language modeling` head on top for CLM fine-tuning.""", BIG_BIRD_START_DOCSTRING ) class BigBirdForCausalLM(BigBirdPreTrainedModel): _keys_to_ignore_on_load_missing = [ r"position_ids", r"predictions.decoder.bias", "cls.predictions.decoder.weight", "cls.predictions.decoder.bias", ] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`") self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[CausalLMOutputWithCrossAttentions, Tuple[torch.FloatTensor]]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past class BigBirdClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForSequenceClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForSequenceClassification >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("l-yohai/bigbird-roberta-base-mnli") >>> model = BigBirdForSequenceClassification.from_pretrained("l-yohai/bigbird-roberta-base-mnli") >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT >>> LONG_ARTICLE = squad_ds[81514]["context"] >>> inputs = tokenizer(LONG_ARTICLE, return_tensors="pt") >>> # long input article >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] 'LABEL_0' ``` ```python >>> num_labels = len(model.config.id2label) >>> model = BigBirdForSequenceClassification.from_pretrained( ... "l-yohai/bigbird-roberta-base-mnli", num_labels=num_labels ... ) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) 1.13 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForMultipleChoice(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForTokenClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BigBirdModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BigBirdForQuestionAnsweringHead(nn.Module): """Head for question answering tasks.""" def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.hidden_dropout_prob) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_output): hidden_states = self.dropout(encoder_output) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states @add_start_docstrings( """ BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForQuestionAnswering(BigBirdPreTrainedModel): def __init__(self, config, add_pooling_layer=False): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.sep_token_id = config.sep_token_id self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, question_lengths=None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BigBirdForQuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForQuestionAnswering >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForQuestionAnswering.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random article and question >>> LONG_ARTICLE = squad_ds[81514]["context"] >>> QUESTION = squad_ds[81514]["question"] >>> QUESTION 'During daytime how high can the temperatures reach?' >>> inputs = tokenizer(QUESTION, LONG_ARTICLE, return_tensors="pt") >>> # long article and question input >>> list(inputs["input_ids"].shape) [1, 929] >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_token_ids = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> predict_answer_token = tokenizer.decode(predict_answer_token_ids) ``` ```python >>> target_start_index, target_end_index = torch.tensor([130]), torch.tensor([132]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) if question_lengths is None and input_ids is not None: # assuming input_ids format: <cls> <question> <sep> context <sep> question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1 question_lengths.unsqueeze_(1) logits_mask = None if question_lengths is not None: # setting lengths logits to `-inf` logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = torch.ones(logits_mask.size(), dtype=int, device=logits_mask.device) - logits_mask logits_mask = logits_mask logits_mask[:, 0] = False logits_mask.unsqueeze_(2) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_classifier(sequence_output) if logits_mask is not None: # removing question tokens from the competition logits = logits - logits_mask * 1e6 start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int): # q_lengths -> (bz, 1) mask = torch.arange(0, maxlen).to(q_lengths.device) mask.unsqueeze_(0) # -> (1, maxlen) mask = torch.where(mask < q_lengths, 1, 0) return mask
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BigBird model.""" import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPoolingAndCrossAttentions, CausalLMOutputWithCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, torch_int_div from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_big_bird import BigBirdConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bigbird-roberta-base" _CONFIG_FOR_DOC = "BigBirdConfig" BIG_BIRD_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/bigbird-roberta-base", "google/bigbird-roberta-large", "google/bigbird-base-trivia-itc", # See all BigBird models at https://huggingface.co/models?filter=big_bird ] _TRIVIA_QA_MAPPING = { "big_bird_attention": "attention/self", "output_layer_norm": "output/LayerNorm", "attention_output": "attention/output/dense", "output": "output/dense", "self_attention_layer_norm": "attention/output/LayerNorm", "intermediate": "intermediate/dense", "word_embeddings": "bert/embeddings/word_embeddings", "position_embedding": "bert/embeddings/position_embeddings", "type_embeddings": "bert/embeddings/token_type_embeddings", "embeddings": "bert/embeddings", "layer_normalization": "output/LayerNorm", "layer_norm": "LayerNorm", "trivia_qa_head": "qa_classifier", "dense": "intermediate/dense", "dense_1": "qa_outputs", } def load_tf_weights_in_big_bird(model, tf_checkpoint_path, is_trivia_qa=False): """Load tf checkpoints in a pytorch model.""" def load_tf_weights_bert(init_vars, tf_path): names = [] tf_weights = {} for name, shape in init_vars: array = tf.train.load_variable(tf_path, name) name = name.replace("bert/encoder/LayerNorm", "bert/embeddings/LayerNorm") logger.info(f"Loading TF weight {name} with shape {shape}") names.append(name) tf_weights[name] = array return names, tf_weights def load_tf_weights_trivia_qa(init_vars): names = [] tf_weights = {} for i, var in enumerate(init_vars): name_items = var.name.split("/") if "transformer_scaffold" in name_items[0]: layer_name_items = name_items[0].split("_") if len(layer_name_items) < 3: layer_name_items += [0] name_items[0] = f"bert/encoder/layer_{layer_name_items[2]}" name = "/".join([_TRIVIA_QA_MAPPING[x] if x in _TRIVIA_QA_MAPPING else x for x in name_items])[ :-2 ] # remove last :0 in variable if "self/attention/output" in name: name = name.replace("self/attention/output", "output") if i >= len(init_vars) - 2: name = name.replace("intermediate", "output") logger.info(f"Loading TF weight {name} with shape {var.shape}") array = var.value().numpy() names.append(name) tf_weights[name] = array return names, tf_weights try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.saved_model.load(tf_path).variables if is_trivia_qa else tf.train.list_variables(tf_path) if len(init_vars) <= 0: raise ValueError("Loaded trained variables cannot be empty.") pt_names = list(model.state_dict().keys()) if is_trivia_qa: names, tf_weights = load_tf_weights_trivia_qa(init_vars) else: names, tf_weights = load_tf_weights_bert(init_vars, tf_path) for txt_name in names: array = tf_weights[txt_name] name = txt_name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue pointer = model pt_name = [] for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") pt_name.append("bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") pt_name.append("weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") pt_name.append("classifier") elif scope_names[0] == "transform": pointer = getattr(pointer, "transform") pt_name.append("transform") if ("bias" in name) or ("kernel" in name): pointer = getattr(pointer, "dense") pt_name.append("dense") elif ("beta" in name) or ("gamma" in name): pointer = getattr(pointer, "LayerNorm") pt_name.append("LayerNorm") else: try: pointer = getattr(pointer, scope_names[0]) pt_name.append(f"{scope_names[0]}") except AttributeError: logger.info(f"Skipping {m_name}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] pt_name.append(f"{num}") if m_name[-11:] == "_embeddings" or m_name == "embeddings": pointer = getattr(pointer, "weight") pt_name.append("weight") elif m_name == "kernel": array = np.transpose(array) try: if len(array.shape) > len(pointer.shape) and math.prod(array.shape) == math.prod(pointer.shape): # print(txt_name, array.shape) if ( txt_name.endswith("attention/self/key/kernel") or txt_name.endswith("attention/self/query/kernel") or txt_name.endswith("attention/self/value/kernel") ): array = array.transpose(1, 0, 2).reshape(pointer.shape) elif txt_name.endswith("attention/output/dense/kernel"): array = array.transpose(0, 2, 1).reshape(pointer.shape) else: array = array.reshape(pointer.shape) if pointer.shape != array.shape: raise ValueError( f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched of {txt_name}." ) except AssertionError as e: e.args += (pointer.shape, array.shape) raise pt_weight_name = ".".join(pt_name) logger.info(f"Initialize PyTorch weight {pt_weight_name} from {txt_name}.") pointer.data = torch.from_numpy(array) tf_weights.pop(txt_name, None) pt_names.remove(pt_weight_name) logger.info(f"Weights not copied to PyTorch model: {', '.join(tf_weights.keys())}.") logger.info(f"Weights not initialized in PyTorch model: {', '.join(pt_names)}.") return model class BigBirdEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.__init__ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) # End copy self.rescale_embeddings = config.rescale_embeddings self.hidden_size = config.hidden_size def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.rescale_embeddings: inputs_embeds = inputs_embeds * (self.hidden_size**0.5) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.dropout(embeddings) embeddings = self.LayerNorm(embeddings) return embeddings class BigBirdSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BigBirdModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs class BigBirdBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None, ): # Currently this `class` can't be used in decoder. batch_size, seqlen, _ = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size if from_seq_length % from_block_size != 0: raise ValueError("Query sided sequence length must be multiple of block size") if to_seq_length % to_block_size != 0: raise ValueError("Key/Value sided sequence length must be multiple of block size") query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) context_layer, attention_probs = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication""" # faster replacement of torch.einsum ("bhqk,bhkd->bhqd") return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view( inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]) ) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication with transpose""" # faster replacement of torch.einsum (bhqd,bhkd->bhqk) return torch.bmm( inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2) ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions, ): # BigBird block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention) # hence following code can be divided into 5 parts. if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 # generate random attention and corresponding masks np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: # old plans used in paper rand_attn = [ self._bigbird_block_rand_mask( self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024 )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) # preparing block for randn attn gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax( first_product, dim=-1 ) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax( second_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = torch.cat( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = torch.cat( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = nn.functional.softmax( band_product, dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax( second_last_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) # combining representations of all tokens context_layer = torch.cat( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2, ) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) # this is just for visualizing; forward pass doesn't depend on following code if output_attentions: # TODO(PVP): need to verify if below code is correct attention_probs = torch.zeros( bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device ) # 1st query block # corresponding to `first_context_layer` attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global # 2nd query block # corresponding to `second_context_layer` attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] # 1st three key blocks (global + sliding) attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] # last key block (global) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Middle query blocks # corresponding to `context_layer` # sliding keys for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view( bsz, n_heads, from_block_size, 3, to_block_size ) # inner_band_product # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view( bsz, n_heads, -1, to_block_size ) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view( bsz, n_heads, -1, to_block_size ) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Second-last query block # corresponding to `second_last_context_layer` attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] # 1st key block (global) attention_probs[ :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size : ] = second_last_attn_weights[ :, :, :, to_block_size : 4 * to_block_size ] # last three blocks (global + sliding) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # last query block # corresponding to `last_context_layer` attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global else: attention_probs = None return context_layer, attention_probs @staticmethod def torch_gather_b2(params, indices): # this operation is equivalent to tf.gather when batch_dims=2 if params.shape[:2] != indices.shape[:2]: raise ValueError( "Make sure that the first two dimensions of params and indices are identical, but" f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}" ) num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) indices_shift = torch_int_div(shift, num_indices_to_gather) * num_indices_to_pick_from flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks @staticmethod def _bigbird_block_rand_mask( from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are chosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") if from_seq_length not in plan_from_length: raise ValueError("Error from sequence length not in plan!") # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = np.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads) ] # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) # permute the blocks perm_block = np.random.permutation(to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->BigBird class BigBirdSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.attention_type = config.attention_type self.config = config self.seed = seed if self.config.attention_type == "original_full": self.self = BigBirdSelfAttention(config) elif self.config.attention_type == "block_sparse": self.self = BigBirdBlockSparseAttention(config, seed) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}" ) self.output = BigBirdSelfOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value if value == "original_full": # copy all weights to new full attention class attn_weights = BigBirdSelfAttention(self.config) else: # copy all weights to new sparse attention class attn_weights = BigBirdBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, # block_sparse config band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, ): # fp16 compatibility if band_mask is not None: band_mask = band_mask.to(hidden_states.dtype) if from_mask is not None: from_mask = from_mask.to(hidden_states.dtype) if to_mask is not None: to_mask = to_mask.to(hidden_states.dtype) if self.attention_type == "original_full": self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) else: if encoder_hidden_states is not None: raise ValueError("BigBird cannot be used as a decoder when config.attention_type != 'original_full'") self_outputs = self.self( hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->BigBird class BigBirdIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->BigBird class BigBirdOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BigBirdLayer(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.attention_type = config.attention_type self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = BigBirdAttention(config, seed=seed) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise TypeError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = BigBirdAttention(config) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.attention.set_attention_type(value) if self.add_cross_attention: self.crossattention.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_value=self_attn_past_key_value, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " " cross-attention layers by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BigBirdEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.attention_type = config.attention_type self.layer = nn.ModuleList( [BigBirdLayer(config, seed=layer_idx) for layer_idx in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value for layer in self.layer: layer.set_attention_type(value) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, band_mask=None, from_mask=None, to_mask=None, blocked_encoder_mask=None, return_dict=True, ) -> Union[BaseModelOutputWithPastAndCrossAttentions, Tuple]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, band_mask, from_mask, to_mask, blocked_encoder_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->BigBird class BigBirdPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->BigBird class BigBirdLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = BigBirdPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->BigBird class BigBirdOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores # Copied from transformers.models.bert.modeling_bert.BertOnlyNSPHead with Bert->BigBird class BigBirdOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score # Copied from transformers.models.bert.modeling_bert.BertPreTrainingHeads with Bert->BigBird class BigBirdPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = BigBirdLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class BigBirdPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BigBirdConfig load_tf_weights = load_tf_weights_in_big_bird base_model_prefix = "bert" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, BigBirdEncoder): module.gradient_checkpointing = value BIG_BIRD_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BigBirdConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BIG_BIRD_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @dataclass class BigBirdForPreTrainingOutput(ModelOutput): """ Output type of [`BigBirdForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class BigBirdForQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering models. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Span-end scores (before SoftMax). pooler_output (`torch.FloatTensor` of shape `(batch_size, 1)`): pooler output from BigBigModel hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( "The bare BigBird Model transformer outputting raw hidden-states without any specific head on top.", BIG_BIRD_START_DOCSTRING, ) class BigBirdModel(BigBirdPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.attention_type = self.config.attention_type self.config = config self.block_size = self.config.block_size self.embeddings = BigBirdEmbeddings(config) self.encoder = BigBirdEncoder(config) if add_pooling_layer: self.pooler = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() else: self.pooler = None self.activation = None if self.attention_type != "original_full" and config.add_cross_attention: logger.warning( "When using `BigBirdForCausalLM` as decoder, then `attention_type` must be `original_full`. Setting" " `attention_type=original_full`" ) self.set_attention_type("original_full") # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.encoder.set_attention_type(value) @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BaseModelOutputWithPoolingAndCrossAttentions, Tuple[torch.FloatTensor]]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # in order to use block_sparse attention, sequence_length has to be at least # bigger than all global attentions: 2 * block_size # + sliding tokens: 3 * block_size # + random tokens: 2 * num_random_blocks * block_size max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == "block_sparse" and seq_length <= max_tokens_to_attend: # change attention_type from block_sparse to original_full sequence_length = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) logger.warning( "Attention type 'block_sparse' is not possible if sequence_length: " f"{sequence_length} <= num global tokens: 2 * config.block_size " "+ min. num sliding tokens: 3 * config.block_size " "+ config.num_random_blocks * config.block_size " "+ additional buffer: config.num_random_blocks * config.block_size " f"= {max_tokens_to_attend} with config.block_size " f"= {self.config.block_size}, config.num_random_blocks " f"= {self.config.num_random_blocks}. " "Changing attention type to 'original_full'..." ) self.set_attention_type("original_full") if self.attention_type == "block_sparse": ( padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds, ) = self._pad_to_block_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id, ) else: padding_len = 0 if self.attention_type == "block_sparse": blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.block_size ) extended_attention_mask = None elif self.attention_type == "original_full": blocked_encoder_mask = None band_mask = None from_mask = None to_mask = None # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.attention_type}" ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, blocked_encoder_mask=blocked_encoder_mask, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooler_output = self.activation(self.pooler(sequence_output[:, 0, :])) if (self.pooler is not None) else None # undo padding if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) sequence_output = sequence_output[:, :-padding_len] if not return_dict: return (sequence_output, pooler_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooler_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @staticmethod def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): batch_size, seq_length = attention_mask.size() if seq_length % block_size != 0: raise ValueError( f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block" f" size is {block_size}." ) def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = torch.cat( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2 ) band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def _pad_to_block_size( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int, ): """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.""" # padding block_size = self.config.block_size input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.info( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.block_size`: {block_size}" ) if input_ids is not None: input_ids = nn.functional.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_bigbird.BigBirdEmbeddings position_ids = nn.functional.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full( (batch_size, padding_len), self.config.pad_token_id, dtype=torch.long, ) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( attention_mask, (0, padding_len), value=False ) # no attention on the padding tokens token_type_ids = nn.functional.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds class BigBirdForPreTraining(BigBirdPreTrainedModel): _keys_to_ignore_on_load_missing = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config, add_pooling_layer=True) self.cls = BigBirdPreTrainingHeads(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.FloatTensor] = None, next_sentence_label: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BigBirdForPreTrainingOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the next sequence prediction (classification) loss. If specified, nsp loss will be added to masked_lm loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. kwargs (`Dict[str, any]`, optional, defaults to *{}*): Used to hide legacy arguments that have been deprecated. Returns: Example: ```python >>> from transformers import AutoTokenizer, BigBirdForPreTraining >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForPreTraining.from_pretrained("google/bigbird-roberta-base") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None: loss_fct = CrossEntropyLoss() total_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if next_sentence_label is not None and total_loss is not None: next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = total_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""BigBird Model with a `language modeling` head on top.""", BIG_BIRD_START_DOCSTRING) class BigBirdForMaskedLM(BigBirdPreTrainedModel): _keys_to_ignore_on_load_missing = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] def __init__(self, config): super().__init__(config) if config.is_decoder: logger.warning( "If you want to use `BigBirdForMaskedLM` make sure `config.is_decoder=False` for " "bi-directional self-attention." ) self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MaskedLMOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForMaskedLM >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForMaskedLM.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random long article >>> LONG_ARTICLE_TARGET = squad_ds[81514]["context"] >>> # select random sentence >>> LONG_ARTICLE_TARGET[332:398] 'the highest values are very close to the theoretical maximum value' >>> # add mask_token >>> LONG_ARTICLE_TO_MASK = LONG_ARTICLE_TARGET.replace("maximum", "[MASK]") >>> inputs = tokenizer(LONG_ARTICLE_TO_MASK, return_tensors="pt") >>> # long article input >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) 'maximum' ``` ```python >>> labels = tokenizer(LONG_ARTICLE_TARGET, return_tensors="pt")["input_ids"] >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) 1.08 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation(self, input_ids, attention_mask=None, **model_kwargs): input_shape = input_ids.shape effective_batch_size = input_shape[0] # add a dummy token if self.config.pad_token_id is None: raise ValueError("The PAD token should be defined for generation") attention_mask = torch.cat([attention_mask, attention_mask.new_zeros((attention_mask.shape[0], 1))], dim=-1) dummy_token = torch.full( (effective_batch_size, 1), self.config.pad_token_id, dtype=torch.long, device=input_ids.device ) input_ids = torch.cat([input_ids, dummy_token], dim=1) return {"input_ids": input_ids, "attention_mask": attention_mask} @add_start_docstrings( """BigBird Model with a `language modeling` head on top for CLM fine-tuning.""", BIG_BIRD_START_DOCSTRING ) class BigBirdForCausalLM(BigBirdPreTrainedModel): _keys_to_ignore_on_load_missing = [ r"position_ids", r"predictions.decoder.bias", "cls.predictions.decoder.weight", "cls.predictions.decoder.bias", ] def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warning("If you want to use `BigBirdForCausalLM` as a standalone, add `is_decoder=True.`") self.bert = BigBirdModel(config) self.cls = BigBirdOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[CausalLMOutputWithCrossAttentions, Tuple[torch.FloatTensor]]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past_key_values is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values} def _reorder_cache(self, past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past class BigBirdClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) self.config = config def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = ACT2FN[self.config.hidden_act](x) x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ BigBird Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForSequenceClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.bert = BigBirdModel(config) self.classifier = BigBirdClassificationHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[SequenceClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForSequenceClassification >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("l-yohai/bigbird-roberta-base-mnli") >>> model = BigBirdForSequenceClassification.from_pretrained("l-yohai/bigbird-roberta-base-mnli") >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT >>> LONG_ARTICLE = squad_ds[81514]["context"] >>> inputs = tokenizer(LONG_ARTICLE, return_tensors="pt") >>> # long input article >>> list(inputs["input_ids"].shape) [1, 919] >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] 'LABEL_0' ``` ```python >>> num_labels = len(model.config.id2label) >>> model = BigBirdForSequenceClassification.from_pretrained( ... "l-yohai/bigbird-roberta-base-mnli", num_labels=num_labels ... ) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) 1.13 ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForMultipleChoice(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = BigBirdModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[MultipleChoiceModelOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ BigBird Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForTokenClassification(BigBirdPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = BigBirdModel(config) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[TokenClassifierOutput, Tuple[torch.FloatTensor]]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class BigBirdForQuestionAnsweringHead(nn.Module): """Head for question answering tasks.""" def __init__(self, config): super().__init__() self.dropout = nn.Dropout(config.hidden_dropout_prob) self.intermediate = BigBirdIntermediate(config) self.output = BigBirdOutput(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) def forward(self, encoder_output): hidden_states = self.dropout(encoder_output) hidden_states = self.intermediate(hidden_states) hidden_states = self.output(hidden_states, encoder_output) hidden_states = self.qa_outputs(hidden_states) return hidden_states @add_start_docstrings( """ BigBird Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BIG_BIRD_START_DOCSTRING, ) class BigBirdForQuestionAnswering(BigBirdPreTrainedModel): def __init__(self, config, add_pooling_layer=False): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.sep_token_id = config.sep_token_id self.bert = BigBirdModel(config, add_pooling_layer=add_pooling_layer) self.qa_classifier = BigBirdForQuestionAnsweringHead(config) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIG_BIRD_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BigBirdForQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.FloatTensor] = None, question_lengths=None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[BigBirdForQuestionAnsweringModelOutput, Tuple[torch.FloatTensor]]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Example: ```python >>> import torch >>> from transformers import AutoTokenizer, BigBirdForQuestionAnswering >>> from datasets import load_dataset >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-roberta-base") >>> model = BigBirdForQuestionAnswering.from_pretrained("google/bigbird-roberta-base") >>> squad_ds = load_dataset("squad_v2", split="train") # doctest: +IGNORE_RESULT >>> # select random article and question >>> LONG_ARTICLE = squad_ds[81514]["context"] >>> QUESTION = squad_ds[81514]["question"] >>> QUESTION 'During daytime how high can the temperatures reach?' >>> inputs = tokenizer(QUESTION, LONG_ARTICLE, return_tensors="pt") >>> # long article and question input >>> list(inputs["input_ids"].shape) [1, 929] >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_token_ids = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> predict_answer_token = tokenizer.decode(predict_answer_token_ids) ``` ```python >>> target_start_index, target_end_index = torch.tensor([130]), torch.tensor([132]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict seqlen = input_ids.size(1) if input_ids is not None else inputs_embeds.size(1) if question_lengths is None and input_ids is not None: # assuming input_ids format: <cls> <question> <sep> context <sep> question_lengths = torch.argmax(input_ids.eq(self.sep_token_id).int(), dim=-1) + 1 question_lengths.unsqueeze_(1) logits_mask = None if question_lengths is not None: # setting lengths logits to `-inf` logits_mask = self.prepare_question_mask(question_lengths, seqlen) if token_type_ids is None: token_type_ids = torch.ones(logits_mask.size(), dtype=int, device=logits_mask.device) - logits_mask logits_mask = logits_mask logits_mask[:, 0] = False logits_mask.unsqueeze_(2) outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_classifier(sequence_output) if logits_mask is not None: # removing question tokens from the competition logits = logits - logits_mask * 1e6 start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return BigBirdForQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, pooler_output=outputs.pooler_output, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @staticmethod def prepare_question_mask(q_lengths: torch.Tensor, maxlen: int): # q_lengths -> (bz, 1) mask = torch.arange(0, maxlen).to(q_lengths.device) mask.unsqueeze_(0) # -> (1, maxlen) mask = torch.where(mask < q_lengths, 1, 0) return mask
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/bigbird_pegasus/modeling_bigbird_pegasus.py
# coding=utf-8 # Copyright 2021 Google Research The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BigBirdPegasus model.""" import copy import math import random from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_bigbird_pegasus import BigBirdPegasusConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bigbird-pegasus-large-arxiv" _CONFIG_FOR_DOC = "BigBirdPegasusConfig" _EXPECTED_OUTPUT_SHAPE = [1, 7, 1024] BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/bigbird-pegasus-large-arxiv", "google/bigbird-pegasus-large-pubmed", "google/bigbird-pegasus-large-bigpatent", # See all BigBirdPegasus models at https://huggingface.co/models?filter=bigbird_pegasus ] def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) mask_cond = torch.arange(mask.size(-1)) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) class BigBirdPegasusLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions) # Copied from transformers.models.big_bird.modeling_big_bird.BigBirdSelfAttention with BigBird->BigBirdPegasus class BigBirdPegasusSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BigBirdPegasusModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.big_bird.modeling_big_bird.BigBirdBlockSparseAttention with BigBird->BigBirdPegasus class BigBirdPegasusBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None, ): # Currently this `class` can't be used in decoder. batch_size, seqlen, _ = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size if from_seq_length % from_block_size != 0: raise ValueError("Query sided sequence length must be multiple of block size") if to_seq_length % to_block_size != 0: raise ValueError("Key/Value sided sequence length must be multiple of block size") query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) context_layer, attention_probs = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication""" # faster replacement of torch.einsum ("bhqk,bhkd->bhqd") return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view( inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]) ) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication with transpose""" # faster replacement of torch.einsum (bhqd,bhkd->bhqk) return torch.bmm( inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2) ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions, ): # BigBirdPegasus block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention) # hence following code can be divided into 5 parts. if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 # generate random attention and corresponding masks np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: # old plans used in paper rand_attn = [ self._bigbird_block_rand_mask( self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024 )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) # preparing block for randn attn gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax( first_product, dim=-1 ) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax( second_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = torch.cat( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = torch.cat( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = nn.functional.softmax( band_product, dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax( second_last_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) # combining representations of all tokens context_layer = torch.cat( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2, ) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) # this is just for visualizing; forward pass doesn't depend on following code if output_attentions: # TODO(PVP): need to verify if below code is correct attention_probs = torch.zeros( bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device ) # 1st query block # corresponding to `first_context_layer` attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global # 2nd query block # corresponding to `second_context_layer` attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] # 1st three key blocks (global + sliding) attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] # last key block (global) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Middle query blocks # corresponding to `context_layer` # sliding keys for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view( bsz, n_heads, from_block_size, 3, to_block_size ) # inner_band_product # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view( bsz, n_heads, -1, to_block_size ) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view( bsz, n_heads, -1, to_block_size ) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Second-last query block # corresponding to `second_last_context_layer` attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] # 1st key block (global) attention_probs[ :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size : ] = second_last_attn_weights[ :, :, :, to_block_size : 4 * to_block_size ] # last three blocks (global + sliding) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # last query block # corresponding to `last_context_layer` attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global else: attention_probs = None return context_layer, attention_probs @staticmethod def torch_gather_b2(params, indices): # this operation is equivalent to tf.gather when batch_dims=2 if params.shape[:2] != indices.shape[:2]: raise ValueError( "Make sure that the first two dimensions of params and indices are identical, but" f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}" ) num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] indices_shift = ( torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) // num_indices_to_gather * num_indices_to_pick_from ) flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks @staticmethod def _bigbird_block_rand_mask( from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are chosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") if from_seq_length not in plan_from_length: raise ValueError("Error from sequence length not in plan!") # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = np.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads) ] # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) # permute the blocks perm_block = np.random.permutation(to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) class BigBirdPegasusEncoderAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.seed = seed self.attention_type = config.attention_type if self.attention_type == "original_full": self.self = BigBirdPegasusSelfAttention(config) elif self.attention_type == "block_sparse": self.self = BigBirdPegasusBlockSparseAttention(config, seed) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}" ) self.output = nn.Linear(config.hidden_size, config.hidden_size, bias=config.use_bias) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value if value == "original_full": # copy all weights to new full attention class attn_weights = BigBirdPegasusSelfAttention(self.config) else: # copy all weights to new sparse attention class attn_weights = BigBirdPegasusBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward( self, hidden_states, attention_mask=None, head_mask=None, past_key_value=None, output_attentions=False, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, ): # Expand dims to enable multiplication in the self-attention module head_mask = head_mask.reshape(1, -1, 1, 1) if head_mask is not None else None if self.config.attention_type == "original_full": self_outputs = self.self( hidden_states, attention_mask, head_mask, past_key_value=past_key_value, output_attentions=output_attentions, ) else: self_outputs = self.self( hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions ) attention_output = self.output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BigBirdPegasusDecoder class BigBirdPegasusDecoderAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class BigBirdPegasusEncoderLayer(nn.Module): def __init__(self, config: BigBirdPegasusConfig, seed=None): super().__init__() self.attention_type = config.attention_type self.embed_dim = config.d_model self.self_attn = BigBirdPegasusEncoderAttention(config, seed=seed) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attention_outputs = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=from_blocked_mask, to_blocked_mask=to_blocked_mask, ) hidden_states = self_attention_outputs[0] hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (self_attention_outputs[1],) return outputs def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.self_attn.set_attention_type(value) class BigBirdPegasusDecoderLayer(nn.Module): def __init__(self, config: BigBirdPegasusConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BigBirdPegasusDecoderAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=config.use_bias, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = BigBirdPegasusDecoderAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=config.use_bias, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->BigBirdPegasus class BigBirdPegasusClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class BigBirdPegasusPreTrainedModel(PreTrainedModel): config_class = BigBirdPegasusConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["BigBirdPegasusEncoderLayer", "BigBirdPegasusDecoderLayer"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (BigBirdPegasusDecoder, BigBirdPegasusEncoder)): module.gradient_checkpointing = value @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs BIGBIRD_PEGASUS_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BigBirdPegasusConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BIGBIRD_PEGASUS_GENERATION_EXAMPLE = r""" Summarization example: ```python >>> from transformers import AutoTokenizer, BigBirdPegasusForConditionalGeneration >>> model = BigBirdPegasusForConditionalGeneration.from_pretrained("google/bigbird-pegasus-large-arxiv") >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv") >>> ARTICLE_TO_SUMMARIZE = ( ... "The dominant sequence transduction models are based on complex recurrent or convolutional neural " ... "networks in an encoder-decoder configuration. The best performing models also connect the encoder " ... "and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, " ... "based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. " ... "Experiments on two machine translation tasks show these models to be superior in quality " ... "while being more parallelizable and requiring significantly less time to train." ... ) >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors="pt", truncation=True) >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=15) >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'dominant sequence models are based on recurrent or convolutional neural networks .' ``` """ BIGBIRD_PEGASUS_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for translation and summarization training. By default, the model will create this tensor by shifting the `input_ids` to the right, following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_bigbird_pegasus._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. decoder_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ BIGBIRD_PEGASUS_STANDALONE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`ProphetNetTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class BigBirdPegasusEncoder(BigBirdPegasusPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`BigBirdPegasusEncoderLayer`]. Args: config: BigBirdPegasusConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.attention_type = config.attention_type self.block_size = config.block_size self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, ) self.layers = nn.ModuleList([BigBirdPegasusEncoderLayer(config, seed=i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is None: attention_mask = torch.ones(input_shape, device=hidden_states.device) attention_mask = attention_mask.long() # in order to use block_sparse attention, sequence_length has to be at least # bigger than all global attentions: 2 * block_size # + sliding tokens: 3 * block_size # + random tokens: 2 * num_random_blocks * block_size max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == "block_sparse" and input_shape[1] <= max_tokens_to_attend: # change attention_type from block_sparse to original_full sequence_length = input_shape[1] logger.warning( "Attention type 'block_sparse' is not possible if sequence_length: " f"{sequence_length} <= num global tokens: 2 * config.block_size " "+ min. num sliding tokens: 3 * config.block_size " "+ config.num_random_blocks * config.block_size " "+ additional buffer: config.num_random_blocks * config.block_size " f"= {max_tokens_to_attend} with config.block_size " f"= {self.config.block_size}, config.num_random_blocks " f"= {self.config.num_random_blocks}. " "Changing attention type to 'original_full'..." ) self.set_attention_type("original_full") if self.attention_type == "block_sparse": padding_len, hidden_states, attention_mask = self._pad_to_block_size(hidden_states, attention_mask) else: padding_len = 0 # expand attention_mask if self.attention_type == "original_full": # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) blocked_encoder_mask = band_mask = from_mask = to_mask = None elif self.attention_type == "block_sparse": blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.block_size ) attention_mask = None else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.attention_type}" ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), band_mask, from_mask, to_mask, blocked_encoder_mask, blocked_encoder_mask, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layernorm_embedding(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) hidden_states = hidden_states[:, :-padding_len] if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) self.encoder_o = hidden_states return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value for layer in self.layers: layer.set_attention_type(value) @staticmethod # Copied from transformers.models.big_bird.modeling_big_bird.BigBirdModel.create_masks_for_block_sparse_attn def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): batch_size, seq_length = attention_mask.size() if seq_length % block_size != 0: raise ValueError( f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block" f" size is {block_size}." ) def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = torch.cat( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2 ) band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def _pad_to_block_size(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor): """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.""" # padding block_size = self.config.block_size batch_size, seq_len = hidden_states.shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.info( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.block_size`: {block_size}" ) pad_id = self.config.pad_token_id device = hidden_states.device input_ids_padding = torch.ones((batch_size, padding_len), dtype=torch.long, device=device) * pad_id inputs_embeds_padding = self.embed_tokens(input_ids_padding) hidden_states = torch.cat([hidden_states, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( attention_mask, (0, padding_len), value=0 ) # no attention on the padding tokens return padding_len, hidden_states, attention_mask class BigBirdPegasusDecoder(BigBirdPegasusPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BigBirdPegasusDecoderLayer`] Args: config: BigBirdPegasusConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, ) self.layers = nn.ModuleList([BigBirdPegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length ).to(inputs_embeds.device) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, use_cache) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layernorm_embedding(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare BigBirdPegasus Model outputting raw hidden-states without any specific head on top.", BIGBIRD_PEGASUS_START_DOCSTRING, ) class BigBirdPegasusModel(BigBirdPegasusPreTrainedModel): _keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: BigBirdPegasusConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = BigBirdPegasusEncoder(config, self.shared) self.decoder = BigBirdPegasusDecoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) # Copied from transformers.models.bart.modeling_bart.BartModel.forward with Bart->BigBirdPegasus def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqModelOutput]: # different to other models, BigBirdPegasus automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The BigBirdPegasus Model with a language modeling head. Can be used for summarization.", BIGBIRD_PEGASUS_START_DOCSTRING, ) # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS class BigBirdPegasusForConditionalGeneration(BigBirdPegasusPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"final_logits_bias", r"lm_head.weight", "encoder.embed_tokens.weight", "decoder.embed_tokens.weight", ] def __init__(self, config: BigBirdPegasusConfig): super().__init__(config) self.model = BigBirdPegasusModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BIGBIRD_PEGASUS_GENERATION_EXAMPLE) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( """ BigBirdPegasus model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BIGBIRD_PEGASUS_START_DOCSTRING, ) class BigBirdPegasusForSequenceClassification(BigBirdPegasusPreTrainedModel): _keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: BigBirdPegasusConfig, **kwargs): super().__init__(config, **kwargs) self.model = BigBirdPegasusModel(config) self.classification_head = BigBirdPegasusClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ BigBirdPegasus Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BIGBIRD_PEGASUS_START_DOCSTRING, ) class BigBirdPegasusForQuestionAnswering(BigBirdPegasusPreTrainedModel): _keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = BigBirdPegasusModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_bart.BartForQuestionAnswering.forward def forward( self, input_ids: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) # Copied from transformers.models.pegasus.modeling_pegasus.PegasusDecoderWrapper with Pegasus->BigBirdPegasus class BigBirdPegasusDecoderWrapper(BigBirdPegasusPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = BigBirdPegasusDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) class BigBirdPegasusForCausalLM(BigBirdPegasusPreTrainedModel): _keys_to_ignore_on_load_missing = ["lm_head.weight"] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = BigBirdPegasusDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import AutoTokenizer, BigBirdPegasusForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv") >>> model = BigBirdPegasusForCausalLM.from_pretrained( ... "google/bigbird-pegasus-large-arxiv", add_cross_attention=False ... ) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: input_ids = input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
# coding=utf-8 # Copyright 2021 Google Research The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch BigBirdPegasus model.""" import copy import math import random from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, Seq2SeqQuestionAnsweringModelOutput, Seq2SeqSequenceClassifierOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import torch_int_div from ...utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_bigbird_pegasus import BigBirdPegasusConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bigbird-pegasus-large-arxiv" _CONFIG_FOR_DOC = "BigBirdPegasusConfig" _EXPECTED_OUTPUT_SHAPE = [1, 7, 1024] BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/bigbird-pegasus-large-arxiv", "google/bigbird-pegasus-large-pubmed", "google/bigbird-pegasus-large-bigpatent", # See all BigBirdPegasus models at https://huggingface.co/models?filter=bigbird_pegasus ] def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id if pad_token_id is None: raise ValueError("self.model.config.pad_token_id has to be defined.") # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.tensor(torch.finfo(dtype).min)) mask_cond = torch.arange(mask.size(-1)) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) class BigBirdPegasusLearnedPositionalEmbedding(nn.Embedding): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int): super().__init__(num_embeddings, embedding_dim) def forward(self, input_ids_shape: torch.Size, past_key_values_length: int = 0): """`input_ids_shape` is expected to be [bsz x seqlen].""" bsz, seq_len = input_ids_shape[:2] positions = torch.arange( past_key_values_length, past_key_values_length + seq_len, dtype=torch.long, device=self.weight.device ) return super().forward(positions) # Copied from transformers.models.big_bird.modeling_big_bird.BigBirdSelfAttention with BigBird->BigBirdPegasus class BigBirdPegasusSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BigBirdPegasusModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.big_bird.modeling_big_bird.BigBirdBlockSparseAttention with BigBird->BigBirdPegasus class BigBirdPegasusBlockSparseAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.max_seqlen = config.max_position_embeddings self.seed = seed if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}." ) self.num_attention_heads = config.num_attention_heads self.num_random_blocks = config.num_random_blocks self.block_size = config.block_size self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.use_bias) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions=None, ): # Currently this `class` can't be used in decoder. batch_size, seqlen, _ = hidden_states.size() to_seq_length = from_seq_length = seqlen from_block_size = to_block_size = self.block_size if from_seq_length % from_block_size != 0: raise ValueError("Query sided sequence length must be multiple of block size") if to_seq_length % to_block_size != 0: raise ValueError("Key/Value sided sequence length must be multiple of block size") query_layer = self.transpose_for_scores(self.query(hidden_states)) key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) context_layer, attention_probs = self.bigbird_block_sparse_attention( query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, self.num_attention_heads, self.num_random_blocks, self.attention_head_size, from_block_size, to_block_size, batch_size, from_seq_length, to_seq_length, seed=self.seed, plan_from_length=None, plan_num_rand_blocks=None, output_attentions=output_attentions, ) context_layer = context_layer.contiguous().view(batch_size, from_seq_length, -1) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs @staticmethod def torch_bmm_nd(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication""" # faster replacement of torch.einsum ("bhqk,bhkd->bhqd") return torch.bmm(inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:])).view( inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 1]) ) @staticmethod def torch_bmm_nd_transpose(inp_1, inp_2, ndim=None): """Fast nd matrix multiplication with transpose""" # faster replacement of torch.einsum (bhqd,bhkd->bhqk) return torch.bmm( inp_1.reshape((-1,) + inp_1.shape[-2:]), inp_2.reshape((-1,) + inp_2.shape[-2:]).transpose(1, 2) ).view(inp_1.shape[: ndim - 2] + (inp_1.shape[ndim - 2], inp_2.shape[ndim - 2])) def bigbird_block_sparse_attention( self, query_layer, key_layer, value_layer, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, n_heads, n_rand_blocks, attention_head_size, from_block_size, to_block_size, batch_size, from_seq_len, to_seq_len, seed, plan_from_length, plan_num_rand_blocks, output_attentions, ): # BigBirdPegasus block-sparse attention as suggested in paper # ITC: # global tokens: 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # ETC: # global tokens: extra_globals_tokens + 2 x block_size # window tokens: 3 x block_size # random tokens: num_rand_tokens x block_size # Note: # 1) Currently, ETC is not supported. # 2) Window size is fixed to 3 blocks & it can be changed only by # changing `block_size`. # 3) Number of global blocks are fixed (2 blocks here) & global tokens can be # controlled only by `block_size`. # attention is calculated separately for q[0], q[1], q[2:-2], q[-2], q[-1] in order to use special trick of shifting tokens (for calculating sliding attention) # hence following code can be divided into 5 parts. if from_seq_len // from_block_size != to_seq_len // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rsqrt_d = 1 / math.sqrt(attention_head_size) bsz = batch_size attn_mask_penalty = -10000.0 # generate random attention and corresponding masks np.random.seed(seed) if from_seq_len in [1024, 3072, 4096]: # old plans used in paper rand_attn = [ self._bigbird_block_rand_mask( self.max_seqlen, self.max_seqlen, from_block_size, to_block_size, n_rand_blocks, last_idx=1024 )[: (from_seq_len // from_block_size - 2)] for _ in range(n_heads) ] else: if plan_from_length is None: plan_from_length, plan_num_rand_blocks = self._get_rand_attn_plan( from_seq_len, from_block_size, n_rand_blocks ) rand_attn = self._bigbird_block_rand_mask_with_head( from_seq_length=from_seq_len, to_seq_length=to_seq_len, from_block_size=from_block_size, to_block_size=to_block_size, num_heads=n_heads, plan_from_length=plan_from_length, plan_num_rand_blocks=plan_num_rand_blocks, ) rand_attn = np.stack(rand_attn, axis=0) rand_attn = torch.tensor(rand_attn, device=query_layer.device, dtype=torch.long) rand_attn.unsqueeze_(0) rand_attn = torch.cat([rand_attn for _ in range(batch_size)], dim=0) rand_mask = self._create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, n_heads, n_rand_blocks, bsz, from_seq_len, from_block_size ) blocked_query_matrix = query_layer.view(bsz, n_heads, from_seq_len // from_block_size, from_block_size, -1) blocked_key_matrix = key_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) blocked_value_matrix = value_layer.view(bsz, n_heads, to_seq_len // to_block_size, to_block_size, -1) # preparing block for randn attn gathered_key = self.torch_gather_b2(blocked_key_matrix, rand_attn) gathered_key = gathered_key.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] gathered_value = self.torch_gather_b2(blocked_value_matrix, rand_attn) gathered_value = gathered_value.view( bsz, n_heads, to_seq_len // to_block_size - 2, n_rand_blocks * to_block_size, -1 ) # [bsz, n_heads, to_seq_len//to_block_size-2, n_rand_blocks, to_block_size, -1] # 1st PART # 1st block (global block) attention scores # q[0] x (k[0], k[1], k[2], k[3], k[4] .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] first_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 0], key_layer, ndim=4) first_product = first_product * rsqrt_d first_product += (1.0 - to_mask) * attn_mask_penalty first_attn_weights = nn.functional.softmax( first_product, dim=-1 ) # [bsz, n_heads, from_block_size, to_seq_len] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] first_context_layer = self.torch_bmm_nd(first_attn_weights, value_layer, ndim=4) first_context_layer.unsqueeze_(2) # 2nd PART # 2nd block attention scores # q[1] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> 2nd, 3rd blocks # global key blocks -> 1st block second_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, 1], blocked_key_matrix[:, :, 2], blocked_key_matrix[:, :, -1], gathered_key[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] second_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, 1], blocked_value_matrix[:, :, 2], blocked_value_matrix[:, :, -1], gathered_value[:, :, 0], ], dim=2, ) # [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, 1], second_key_mat, ndim=4) second_seq_pad = torch.cat( [ to_mask[:, :, :, : 3 * to_block_size], to_mask[:, :, :, -to_block_size:], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, 0], ], dim=3, ) second_product = second_product * rsqrt_d second_product += (1.0 - torch.minimum(second_seq_pad, second_rand_pad)) * attn_mask_penalty second_attn_weights = nn.functional.softmax( second_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_context_layer = self.torch_bmm_nd(second_attn_weights, second_value_mat, ndim=4) second_context_layer.unsqueeze_(2) # 3rd PART # Middle blocks attention scores # q[-2:2] x (sliding_keys, random_keys, global_keys) # sliding attn is calculated using special trick of shifting tokens as discussed in paper # random keys are generated by taking random indices as per `rand_attn` # global keys -> 1st & last block exp_blocked_key_matrix = torch.cat( [blocked_key_matrix[:, :, 1:-3], blocked_key_matrix[:, :, 2:-2], blocked_key_matrix[:, :, 3:-1]], dim=3 ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] exp_blocked_value_matrix = torch.cat( [blocked_value_matrix[:, :, 1:-3], blocked_value_matrix[:, :, 2:-2], blocked_value_matrix[:, :, 3:-1]], dim=3, ) # [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] middle_query_matrix = blocked_query_matrix[:, :, 2:-2] # sliding attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [b, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] inner_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, exp_blocked_key_matrix, ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, 3*to_block_size] inner_band_product = inner_band_product * rsqrt_d # randn attention scores for q[-2:2] # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] rand_band_product = self.torch_bmm_nd_transpose(middle_query_matrix, gathered_key[:, :, 1:-1], ndim=5) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] rand_band_product = rand_band_product * rsqrt_d # Including 1st block (since it's global) first_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] first_band_product = first_band_product * rsqrt_d # Including last block (since it's global) last_band_product = torch.einsum( "bhlqd,bhkd->bhlqk", middle_query_matrix, blocked_key_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] last_band_product = last_band_product * rsqrt_d # masking padded tokens inner_band_product += (1.0 - band_mask) * attn_mask_penalty first_band_product += (1.0 - to_mask[:, :, :, :to_block_size].unsqueeze(3)) * attn_mask_penalty last_band_product += (1.0 - to_mask[:, :, :, -to_block_size:].unsqueeze(3)) * attn_mask_penalty rand_band_product += (1.0 - rand_mask[:, :, 1:-1]) * attn_mask_penalty # completing attention scores matrix for all q[-2:2] band_product = torch.cat( [first_band_product, inner_band_product, rand_band_product, last_band_product], dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # safely doing softmax since attention matrix is completed attn_weights = nn.functional.softmax( band_product, dim=-1 ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, (5+n_rand_blocks)*to_block_size] # contribution of sliding keys # [bsz, n_heads, m//from_block_size-4, from_block_size, 3*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, 3*to_block_size, -1] context_layer = self.torch_bmm_nd( attn_weights[:, :, :, :, to_block_size : 4 * to_block_size], exp_blocked_value_matrix, ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of random keys # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, n_rand_blocks*to_block_size] x [bsz, n_heads, from_seq_len//from_block_size-4, n_rand_blocks*to_block_size, -1] context_layer += self.torch_bmm_nd( attn_weights[:, :, :, :, 4 * to_block_size : -to_block_size], gathered_value[:, :, 1:-1], ndim=5 ) # ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # adding contribution of global keys context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, :to_block_size], blocked_value_matrix[:, :, 0] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] context_layer += torch.einsum( "bhlqk,bhkd->bhlqd", attn_weights[:, :, :, :, -to_block_size:], blocked_value_matrix[:, :, -1] ) # [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, to_block_size] x [bsz, n_heads, to_block_size, -1] ==> [bsz, n_heads, from_seq_len//from_block_size-4, from_block_size, -1] # 4th PART # last 2nd token attention scores # q[-2] x (sliding_keys, random_keys, global_keys) # sliding key blocks -> last 3 blocks # global key block -> 1st block # random key block -> based on indices stored in `randn_attn` second_last_key_mat = torch.cat( [ blocked_key_matrix[:, :, 0], blocked_key_matrix[:, :, -3], blocked_key_matrix[:, :, -2], blocked_key_matrix[:, :, -1], gathered_key[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+n_random_blocks)*to_block_size, -1] second_last_value_mat = torch.cat( [ blocked_value_matrix[:, :, 0], blocked_value_matrix[:, :, -3], blocked_value_matrix[:, :, -2], blocked_value_matrix[:, :, -1], gathered_value[:, :, -1], ], dim=2, ) # [bsz, n_heads, (4+r)*to_block_size, -1] # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] second_last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -2], second_last_key_mat, ndim=4) second_last_seq_pad = torch.cat( [ to_mask[:, :, :, :to_block_size], to_mask[:, :, :, -3 * to_block_size :], to_mask.new_ones([bsz, 1, 1, n_rand_blocks * to_block_size]), ], dim=3, ) second_last_rand_pad = torch.cat( [ rand_mask.new_ones([bsz, n_heads, from_block_size, 4 * to_block_size]), rand_mask[:, :, -1], ], dim=3, ) second_last_product = second_last_product * rsqrt_d second_last_product += (1.0 - torch.minimum(second_last_seq_pad, second_last_rand_pad)) * attn_mask_penalty second_last_attn_weights = nn.functional.softmax( second_last_product, dim=-1 ) # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] # [bsz, n_heads, from_block_size, (4+n_rand_blocks)*to_block_size] x [bsz, n_heads, (4+n_rand_blocks)*to_block_size, -1] ==> [bsz, n_heads, from_block_size, -1] second_last_context_layer = self.torch_bmm_nd(second_last_attn_weights, second_last_value_mat, ndim=4) second_last_context_layer.unsqueeze_(2) # 5th PART # last block (global) attention scores # q[-1] x (k[0], k[1], k[2], k[3], .... ) # [bsz, n_heads, from_block_size, -1] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, to_seq_len] last_product = self.torch_bmm_nd_transpose(blocked_query_matrix[:, :, -1], key_layer, ndim=4) last_product = last_product * rsqrt_d last_product += (1.0 - to_mask) * attn_mask_penalty last_attn_weights = nn.functional.softmax(last_product, dim=-1) # [bsz, n_heads, from_block_size, n] # [bsz, n_heads, from_block_size, to_seq_len] x [bsz, n_heads, to_seq_len, -1] ==> [bsz, n_heads, from_block_size, -1] last_context_layer = self.torch_bmm_nd(last_attn_weights, value_layer, ndim=4) last_context_layer.unsqueeze_(2) # combining representations of all tokens context_layer = torch.cat( [first_context_layer, second_context_layer, context_layer, second_last_context_layer, last_context_layer], dim=2, ) context_layer = context_layer.view((bsz, n_heads, from_seq_len, -1)) * from_mask context_layer = torch.transpose(context_layer, 1, 2) # this is just for visualizing; forward pass doesn't depend on following code if output_attentions: # TODO(PVP): need to verify if below code is correct attention_probs = torch.zeros( bsz, n_heads, from_seq_len, to_seq_len, dtype=torch.float, device=context_layer.device ) # 1st query block # corresponding to `first_context_layer` attention_probs[:, :, :from_block_size, :] = first_attn_weights # all keys global # 2nd query block # corresponding to `second_context_layer` attention_probs[:, :, from_block_size : 2 * from_block_size, : 3 * to_block_size] = second_attn_weights[ :, :, :, : 3 * to_block_size ] # 1st three key blocks (global + sliding) attention_probs[:, :, from_block_size : 2 * from_block_size, -to_block_size:] = second_attn_weights[ :, :, :, 3 * to_block_size : 4 * to_block_size ] # last key block (global) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, 1, :, i2[0]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Middle query blocks # corresponding to `context_layer` # sliding keys for q_idx in range(from_seq_len // from_block_size - 4): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, )[:, :, 2:-2, :, 1:-1, :] right_slice = attn_weights[:, :, q_idx, :, to_block_size : 4 * to_block_size] attn_probs_view[:, :, q_idx, :, q_idx : q_idx + 3, :] = right_slice.view( bsz, n_heads, from_block_size, 3, to_block_size ) # inner_band_product # global keys (corresponding to 1st key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, :to_block_size] = attn_weights[ :, :, :, :, :to_block_size ].view( bsz, n_heads, -1, to_block_size ) # first_band_product # global keys (corresponding to last key block) attention_probs[:, :, 2 * from_block_size : -2 * from_block_size, -to_block_size:] = attn_weights[ :, :, :, :, -to_block_size: ].view( bsz, n_heads, -1, to_block_size ) # last_band_product # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads for q_idx in range(1, len(i2) - 1): attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[q_idx - 1, :, 4 * to_block_size : -to_block_size] attn_probs_view[p1, p2, q_idx + 1, :, i2[q_idx]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # Second-last query block # corresponding to `second_last_context_layer` attention_probs[:, :, -2 * from_block_size : -from_block_size, :to_block_size] = second_last_attn_weights[ :, :, :, :to_block_size ] # 1st key block (global) attention_probs[ :, :, -2 * from_block_size : -from_block_size, -3 * to_block_size : ] = second_last_attn_weights[ :, :, :, to_block_size : 4 * to_block_size ] # last three blocks (global + sliding) # random keys for p1, i1, w1 in zip(range(bsz), rand_attn, second_last_attn_weights): # p1, i1, w1 corresponds to batch_dim i.e. following operation is done for each sequence in batch for p2, i2, w2 in zip(range(n_heads), i1, w1): # p2, i2, w2 corresponds to head_dim i.e. following operation is done for each heads attn_probs_view = attention_probs.view( bsz, n_heads, from_seq_len // from_block_size, from_block_size, to_seq_len // to_block_size, to_block_size, ) right_slice = w2[:, 4 * to_block_size :] attn_probs_view[p1, p2, -2, :, i2[-1]] = right_slice.view( from_block_size, n_rand_blocks, to_block_size ) # last query block # corresponding to `last_context_layer` attention_probs[:, :, -from_block_size:, :] = last_attn_weights # all keys global else: attention_probs = None return context_layer, attention_probs @staticmethod def torch_gather_b2(params, indices): # this operation is equivalent to tf.gather when batch_dims=2 if params.shape[:2] != indices.shape[:2]: raise ValueError( "Make sure that the first two dimensions of params and indices are identical, but" f" they are params: {params.shape[:2]} vs. indices: {indices.shape[:2]}" ) num_indices_to_gather = indices.shape[-2] * indices.shape[-1] num_indices_to_pick_from = params.shape[2] shift = torch.arange(indices.shape[0] * indices.shape[1] * num_indices_to_gather, device=indices.device) indices_shift = torch_int_div(shift, num_indices_to_gather) * num_indices_to_pick_from flattened_indices = indices.view(-1) + indices_shift flattened_params = params.reshape(-1, params.shape[-2], params.shape[-1]) out_flattened = flattened_params.index_select(0, flattened_indices) out = out_flattened.reshape(params.shape[:2] + (num_indices_to_gather,) + params.shape[3:]) return out @staticmethod def _create_rand_mask_from_inputs( from_blocked_mask, to_blocked_mask, rand_attn, num_attention_heads, num_rand_blocks, batch_size, from_seq_length, from_block_size, ): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. rand_attn: [batch_size, num_attention_heads, from_seq_length//from_block_size-2, num_rand_blocks] num_attention_heads: int. Number of attention heads. num_rand_blocks: int. Number of random chunks per row. batch_size: int. Batch size for computation. from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. Returns: float Tensor of shape [batch_size, num_attention_heads, from_seq_length//from_block_size-2, from_block_size, num_rand_blocks*to_block_size]. """ num_windows = from_seq_length // from_block_size - 2 rand_mask = torch.stack([p1[i1.flatten()] for p1, i1 in zip(to_blocked_mask, rand_attn)]) rand_mask = rand_mask.view(batch_size, num_attention_heads, num_windows, num_rand_blocks * from_block_size) rand_mask = torch.einsum("blq,bhlk->bhlqk", from_blocked_mask[:, 1:-1], rand_mask) return rand_mask @staticmethod def _get_rand_attn_plan(from_seq_length, from_block_size, num_rand_blocks): """ Gives the plan of where to put random attention. Args: from_seq_length: int. length of from sequence. from_block_size: int. size of block in from sequence. num_rand_blocks: int. Number of random chunks per row. Returns: plan_from_length: ending location of from block plan_num_rand_blocks: number of random ending location for each block """ plan_from_length = [] plan_num_rand_blocks = [] if (2 * num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((2 * num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(0) elif (num_rand_blocks + 5) < (from_seq_length // from_block_size): plan_from_length.append(int((num_rand_blocks + 5) * from_block_size)) plan_num_rand_blocks.append(num_rand_blocks // 2) plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks - (num_rand_blocks // 2)) else: plan_from_length.append(from_seq_length) plan_num_rand_blocks.append(num_rand_blocks) return plan_from_length, plan_num_rand_blocks @staticmethod def _bigbird_block_rand_mask( from_seq_length, to_seq_length, from_block_size, to_block_size, num_rand_blocks, last_idx=-1 ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_rand_blocks: int. Number of random chunks per row. last_idx: if -1 then num_rand_blocks blocks chosen anywhere in to sequence, if positive then num_rand_blocks blocks chosen only up to last_idx. Returns: adjacency list of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") rand_attn = np.zeros((from_seq_length // from_block_size - 2, num_rand_blocks), dtype=np.int32) middle_seq = np.arange(1, to_seq_length // to_block_size - 1, dtype=np.int32) last = to_seq_length // to_block_size - 1 if last_idx > (2 * to_block_size): last = (last_idx // to_block_size) - 1 r = num_rand_blocks # shorthand for i in range(1, from_seq_length // from_block_size - 1): start = i - 2 end = i if i == 1: rand_attn[i - 1, :] = np.random.permutation(middle_seq[2:last])[:r] elif i == 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[3:last])[:r] elif i == from_seq_length // from_block_size - 3: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -3: should have been sliced till last-3 elif i == from_seq_length // from_block_size - 2: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:last])[:r] # Missing -4: should have been sliced till last-4 else: if start > last: start = last rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] elif (end + 1) == last: rand_attn[i - 1, :] = np.random.permutation(middle_seq[:start])[:r] else: rand_attn[i - 1, :] = np.random.permutation( np.concatenate((middle_seq[:start], middle_seq[end + 1 : last])) )[:r] return rand_attn def _bigbird_block_rand_mask_with_head( self, from_seq_length, to_seq_length, from_block_size, to_block_size, num_heads, plan_from_length, plan_num_rand_blocks, window_block_left=1, window_block_right=1, global_block_top=1, global_block_bottom=1, global_block_left=1, global_block_right=1, ): """ Create adjacency list of random attention. Args: from_seq_length: int. length of from sequence. to_seq_length: int. length of to sequence. from_block_size: int. size of block in from sequence. to_block_size: int. size of block in to sequence. num_heads: int. total number of heads. plan_from_length: list. plan from length where num_random_blocks are chosen from. plan_num_rand_blocks: list. number of rand blocks within the plan. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_top: int. number of blocks at the top. global_block_bottom: int. number of blocks at the bottom. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: adjacency list of size num_head where each element is of size from_seq_length//from_block_size-2 by num_rand_blocks """ # using this method when from_seq_length not in [1024, 3072, 4096] if from_seq_length // from_block_size != to_seq_length // to_block_size: raise ValueError("Error the number of blocks needs to be same!") if from_seq_length not in plan_from_length: raise ValueError("Error from sequence length not in plan!") # Total number of blocks in the mmask num_blocks = from_seq_length // from_block_size # Number of blocks per plan plan_block_length = np.array(plan_from_length) // from_block_size # till when to follow plan max_plan_idx = plan_from_length.index(from_seq_length) # Random Attention adjacency list rand_attn = [ np.zeros((num_blocks, np.sum(plan_num_rand_blocks[: max_plan_idx + 1])), dtype=np.int32) for i in range(num_heads) ] # We will go iteratively over the plan blocks and pick random number of # Attention blocks from the legally allowed blocks for plan_idx in range(max_plan_idx + 1): rnd_r_cnt = 0 if plan_idx > 0: # set the row for all from_blocks starting from 0 to # plan_block_length[plan_idx-1] # column indx start fromm plan_block_length[plan_idx-1] and ends at # plan_block_length[plan_idx] if plan_num_rand_blocks[plan_idx] > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) for blk_rw_idx in range(global_block_top, plan_block_length[plan_idx - 1]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=plan_block_length[plan_idx - 1], to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for pl_id in range(plan_idx): if plan_num_rand_blocks[pl_id] == 0: continue for blk_rw_idx in range(plan_block_length[plan_idx - 1], plan_block_length[plan_idx]): rnd_r_cnt = 0 to_start_block_id = 0 if pl_id > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:pl_id])) to_start_block_id = plan_block_length[pl_id - 1] curr_r_cnt = int(np.sum(plan_num_rand_blocks[: pl_id + 1])) for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[pl_id], num_rand_blocks=plan_num_rand_blocks[pl_id], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) if plan_num_rand_blocks[plan_idx] == 0: continue curr_r_cnt = int(np.sum(plan_num_rand_blocks[: plan_idx + 1])) from_start_block_id = global_block_top to_start_block_id = 0 if plan_idx > 0: rnd_r_cnt = int(np.sum(plan_num_rand_blocks[:plan_idx])) from_start_block_id = plan_block_length[plan_idx - 1] to_start_block_id = plan_block_length[plan_idx - 1] for blk_rw_idx in range(from_start_block_id, plan_block_length[plan_idx]): for h in range(num_heads): rand_attn[h][blk_rw_idx, rnd_r_cnt:curr_r_cnt] = self._get_single_block_row_attention( block_id=blk_rw_idx, to_start_block_id=to_start_block_id, to_end_block_id=plan_block_length[plan_idx], num_rand_blocks=plan_num_rand_blocks[plan_idx], window_block_left=window_block_left, window_block_right=window_block_right, global_block_left=global_block_left, global_block_right=global_block_right, ) for nh in range(num_heads): rand_attn[nh] = rand_attn[nh][global_block_top : num_blocks - global_block_bottom, :] return rand_attn @staticmethod def _get_single_block_row_attention( block_id, to_start_block_id, to_end_block_id, num_rand_blocks, window_block_left=1, window_block_right=1, global_block_left=1, global_block_right=1, ): """ For a single row block get random row attention. Args: block_id: int. block id of row. to_start_block_id: int. random attention column start id. to_end_block_id: int. random attention column end id. num_rand_blocks: int. number of random blocks to be selected. window_block_left: int. number of blocks of window to left of a block. window_block_right: int. number of blocks of window to right of a block. global_block_left: int. Number of blocks globally used to the left. global_block_right: int. Number of blocks globally used to the right. Returns: row containing the random attention vector of size num_rand_blocks. """ # list of to_blocks from which to choose random attention to_block_list = np.arange(to_start_block_id, to_end_block_id, dtype=np.int32) # permute the blocks perm_block = np.random.permutation(to_block_list) # illegal blocks for the current block id, using window illegal_blocks = list(range(block_id - window_block_left, block_id + window_block_right + 1)) # Add blocks at the start and at the end illegal_blocks.extend(list(range(global_block_left))) illegal_blocks.extend(list(range(to_end_block_id - global_block_right, to_end_block_id))) # The second from_block cannot choose random attention on second last to_block if block_id == 1: illegal_blocks.append(to_end_block_id - 2) # The second last from_block cannot choose random attention on second to_block if block_id == to_end_block_id - 2: illegal_blocks.append(1) selected_random_blokcs = [] for i in range(to_end_block_id - to_start_block_id): if perm_block[i] not in illegal_blocks: selected_random_blokcs.append(perm_block[i]) if len(selected_random_blokcs) == num_rand_blocks: break return np.array(selected_random_blokcs, dtype=np.int32) class BigBirdPegasusEncoderAttention(nn.Module): def __init__(self, config, seed=None): super().__init__() self.config = config self.seed = seed self.attention_type = config.attention_type if self.attention_type == "original_full": self.self = BigBirdPegasusSelfAttention(config) elif self.attention_type == "block_sparse": self.self = BigBirdPegasusBlockSparseAttention(config, seed) else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.config.attention_type}" ) self.output = nn.Linear(config.hidden_size, config.hidden_size, bias=config.use_bias) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value if value == "original_full": # copy all weights to new full attention class attn_weights = BigBirdPegasusSelfAttention(self.config) else: # copy all weights to new sparse attention class attn_weights = BigBirdPegasusBlockSparseAttention(self.config, self.seed) attn_weights.query = self.self.query attn_weights.value = self.self.value attn_weights.key = self.self.key self.self = attn_weights self.attention_type = value if not self.training: self.self.eval() def forward( self, hidden_states, attention_mask=None, head_mask=None, past_key_value=None, output_attentions=False, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, ): # Expand dims to enable multiplication in the self-attention module head_mask = head_mask.reshape(1, -1, 1, 1) if head_mask is not None else None if self.config.attention_type == "original_full": self_outputs = self.self( hidden_states, attention_mask, head_mask, past_key_value=past_key_value, output_attentions=output_attentions, ) else: self_outputs = self.self( hidden_states, band_mask, from_mask, to_mask, from_blocked_mask, to_blocked_mask, output_attentions ) attention_output = self.output(self_outputs[0]) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->BigBirdPegasusDecoder class BigBirdPegasusDecoderAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, _ = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj # `past_key_value[0].shape[2] == key_value_states.shape[1]` # is checking that the `sequence_length` of the `past_key_value` is the same as # the provided `key_value_states` to support prefix tuning if ( is_cross_attention and past_key_value is not None and past_key_value[0].shape[2] == key_value_states.shape[1] ): # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (bsz, 1, tgt_len, src_len): raise ValueError( f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" ) attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if layer_head_mask is not None: if layer_head_mask.size() != (self.num_heads,): raise ValueError( f"Head mask for a single layer should be of size {(self.num_heads,)}, but is" f" {layer_head_mask.size()}" ) attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to be reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) # Use the `embed_dim` from the config (stored in the class) rather than `hidden_state` because `attn_output` can be # partitioned aross GPUs when using tensor-parallelism. attn_output = attn_output.reshape(bsz, tgt_len, self.embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class BigBirdPegasusEncoderLayer(nn.Module): def __init__(self, config: BigBirdPegasusConfig, seed=None): super().__init__() self.attention_type = config.attention_type self.embed_dim = config.d_model self.self_attn = BigBirdPegasusEncoderAttention(config, seed=seed) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, band_mask=None, from_mask=None, to_mask=None, from_blocked_mask=None, to_blocked_mask=None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) self_attention_outputs = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, output_attentions=output_attentions, band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=from_blocked_mask, to_blocked_mask=to_blocked_mask, ) hidden_states = self_attention_outputs[0] hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (self_attention_outputs[1],) return outputs def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value self.self_attn.set_attention_type(value) class BigBirdPegasusDecoderLayer(nn.Module): def __init__(self, config: BigBirdPegasusConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = BigBirdPegasusDecoderAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=config.use_bias, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = BigBirdPegasusDecoderAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, bias=config.use_bias, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) # Copied from transformers.models.mbart.modeling_mbart.MBartDecoderLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, cross_attn_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ) -> torch.Tensor: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(batch, seq_len, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (`torch.FloatTensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)`. cross_attn_layer_head_mask (`torch.FloatTensor`): mask for cross-attention heads in a given layer of size `(decoder_attention_heads,)`. past_key_value (`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=cross_attn_layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs # Copied from transformers.models.bart.modeling_bart.BartClassificationHead with Bart->BigBirdPegasus class BigBirdPegasusClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__( self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float, ): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class BigBirdPegasusPreTrainedModel(PreTrainedModel): config_class = BigBirdPegasusConfig base_model_prefix = "model" supports_gradient_checkpointing = True _no_split_modules = ["BigBirdPegasusEncoderLayer", "BigBirdPegasusDecoderLayer"] def _init_weights(self, module): std = self.config.init_std if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (BigBirdPegasusDecoder, BigBirdPegasusEncoder)): module.gradient_checkpointing = value @property def dummy_inputs(self): pad_token = self.config.pad_token_id input_ids = torch.tensor([[0, 6, 10, 4, 2], [0, 8, 12, 2, pad_token]], device=self.device) dummy_inputs = { "attention_mask": input_ids.ne(pad_token), "input_ids": input_ids, } return dummy_inputs BIGBIRD_PEGASUS_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`BigBirdPegasusConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ BIGBIRD_PEGASUS_GENERATION_EXAMPLE = r""" Summarization example: ```python >>> from transformers import AutoTokenizer, BigBirdPegasusForConditionalGeneration >>> model = BigBirdPegasusForConditionalGeneration.from_pretrained("google/bigbird-pegasus-large-arxiv") >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv") >>> ARTICLE_TO_SUMMARIZE = ( ... "The dominant sequence transduction models are based on complex recurrent or convolutional neural " ... "networks in an encoder-decoder configuration. The best performing models also connect the encoder " ... "and decoder through an attention mechanism. We propose a new simple network architecture, the Transformer, " ... "based solely on attention mechanisms, dispensing with recurrence and convolutions entirely. " ... "Experiments on two machine translation tasks show these models to be superior in quality " ... "while being more parallelizable and requiring significantly less time to train." ... ) >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=4096, return_tensors="pt", truncation=True) >>> # Generate Summary >>> summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=15) >>> tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] 'dominant sequence models are based on recurrent or convolutional neural networks .' ``` """ BIGBIRD_PEGASUS_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Provide for translation and summarization training. By default, the model will create this tensor by shifting the `input_ids` to the right, following the paper. decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*): Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read [`modeling_bigbird_pegasus._prepare_decoder_attention_mask`] and modify to your needs. See diagram 1 in [the paper](https://arxiv.org/abs/1910.13461) for more information on the default strategy. decoder_head_mask (`torch.Tensor` of shape `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, target_sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `decoder_input_ids` you can choose to directly pass an embedded representation. If `past_key_values` is used, optionally only the last `decoder_inputs_embeds` have to be input (see `past_key_values`). This is useful if you want more control over how to convert `decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If `decoder_input_ids` and `decoder_inputs_embeds` are both unset, `decoder_inputs_embeds` takes the value of `inputs_embeds`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ BIGBIRD_PEGASUS_STANDALONE_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`ProphetNetTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ class BigBirdPegasusEncoder(BigBirdPegasusPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`BigBirdPegasusEncoderLayer`]. Args: config: BigBirdPegasusConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.attention_type = config.attention_type self.block_size = config.block_size self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, embed_dim, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding( config.max_position_embeddings, embed_dim, ) self.layers = nn.ModuleList([BigBirdPegasusEncoderLayer(config, seed=i) for i in range(config.encoder_layers)]) self.layernorm_embedding = nn.LayerNorm(embed_dim) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs_embeds + embed_pos hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) if attention_mask is None: attention_mask = torch.ones(input_shape, device=hidden_states.device) attention_mask = attention_mask.long() # in order to use block_sparse attention, sequence_length has to be at least # bigger than all global attentions: 2 * block_size # + sliding tokens: 3 * block_size # + random tokens: 2 * num_random_blocks * block_size max_tokens_to_attend = (5 + 2 * self.config.num_random_blocks) * self.config.block_size if self.attention_type == "block_sparse" and input_shape[1] <= max_tokens_to_attend: # change attention_type from block_sparse to original_full sequence_length = input_shape[1] logger.warning( "Attention type 'block_sparse' is not possible if sequence_length: " f"{sequence_length} <= num global tokens: 2 * config.block_size " "+ min. num sliding tokens: 3 * config.block_size " "+ config.num_random_blocks * config.block_size " "+ additional buffer: config.num_random_blocks * config.block_size " f"= {max_tokens_to_attend} with config.block_size " f"= {self.config.block_size}, config.num_random_blocks " f"= {self.config.num_random_blocks}. " "Changing attention type to 'original_full'..." ) self.set_attention_type("original_full") if self.attention_type == "block_sparse": padding_len, hidden_states, attention_mask = self._pad_to_block_size(hidden_states, attention_mask) else: padding_len = 0 # expand attention_mask if self.attention_type == "original_full": # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) blocked_encoder_mask = band_mask = from_mask = to_mask = None elif self.attention_type == "block_sparse": blocked_encoder_mask, band_mask, from_mask, to_mask = self.create_masks_for_block_sparse_attn( attention_mask, self.block_size ) attention_mask = None else: raise ValueError( f"attention_type can either be original_full or block_sparse, but is {self.attention_type}" ) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.size()[0] != len(self.layers): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), band_mask, from_mask, to_mask, blocked_encoder_mask, blocked_encoder_mask, ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), band_mask=band_mask, from_mask=from_mask, to_mask=to_mask, from_blocked_mask=blocked_encoder_mask, to_blocked_mask=blocked_encoder_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layernorm_embedding(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) hidden_states = hidden_states[:, :-padding_len] if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) self.encoder_o = hidden_states return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) def set_attention_type(self, value: str): if value not in ["original_full", "block_sparse"]: raise ValueError( f"attention_type can only be set to either 'original_full' or 'block_sparse', but is {value}" ) # attention type is already correctly set if value == self.attention_type: return self.attention_type = value for layer in self.layers: layer.set_attention_type(value) @staticmethod # Copied from transformers.models.big_bird.modeling_big_bird.BigBirdModel.create_masks_for_block_sparse_attn def create_masks_for_block_sparse_attn(attention_mask: torch.Tensor, block_size: int): batch_size, seq_length = attention_mask.size() if seq_length % block_size != 0: raise ValueError( f"Sequence length must be multiple of block size, but sequence length is {seq_length}, while block" f" size is {block_size}." ) def create_band_mask_from_inputs(from_blocked_mask, to_blocked_mask): """ Create 3D attention mask from a 2D tensor mask. Args: from_blocked_mask: 2D Tensor of shape [batch_size, from_seq_length//from_block_size, from_block_size]. to_blocked_mask: int32 Tensor of shape [batch_size, to_seq_length//to_block_size, to_block_size]. Returns: float Tensor of shape [batch_size, 1, from_seq_length//from_block_size-4, from_block_size, 3*to_block_size]. """ exp_blocked_to_pad = torch.cat( [to_blocked_mask[:, 1:-3], to_blocked_mask[:, 2:-2], to_blocked_mask[:, 3:-1]], dim=2 ) band_mask = torch.einsum("blq,blk->blqk", from_blocked_mask[:, 2:-2], exp_blocked_to_pad) band_mask.unsqueeze_(1) return band_mask blocked_encoder_mask = attention_mask.view(batch_size, seq_length // block_size, block_size) band_mask = create_band_mask_from_inputs(blocked_encoder_mask, blocked_encoder_mask) from_mask = attention_mask.view(batch_size, 1, seq_length, 1) to_mask = attention_mask.view(batch_size, 1, 1, seq_length) return blocked_encoder_mask, band_mask, from_mask, to_mask def _pad_to_block_size(self, hidden_states: torch.Tensor, attention_mask: torch.Tensor): """A helper function to pad tokens and mask to work with implementation of BigBird block-sparse attention.""" # padding block_size = self.config.block_size batch_size, seq_len = hidden_states.shape[:2] padding_len = (block_size - seq_len % block_size) % block_size if padding_len > 0: logger.info( f"Input ids are automatically padded from {seq_len} to {seq_len + padding_len} to be a multiple of " f"`config.block_size`: {block_size}" ) pad_id = self.config.pad_token_id device = hidden_states.device input_ids_padding = torch.ones((batch_size, padding_len), dtype=torch.long, device=device) * pad_id inputs_embeds_padding = self.embed_tokens(input_ids_padding) hidden_states = torch.cat([hidden_states, inputs_embeds_padding], dim=-2) attention_mask = nn.functional.pad( attention_mask, (0, padding_len), value=0 ) # no attention on the padding tokens return padding_len, hidden_states, attention_mask class BigBirdPegasusDecoder(BigBirdPegasusPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`BigBirdPegasusDecoderLayer`] Args: config: BigBirdPegasusConfig embed_tokens (nn.Embedding): output embedding """ def __init__(self, config: BigBirdPegasusConfig, embed_tokens: Optional[nn.Embedding] = None): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_position_embeddings self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) if embed_tokens is not None: self.embed_tokens.weight = embed_tokens.weight self.embed_positions = BigBirdPegasusLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, ) self.layers = nn.ModuleList([BigBirdPegasusDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layernorm_embedding = nn.LayerNorm(config.d_model) self.gradient_checkpointing = False # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length ).to(inputs_embeds.device) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.Tensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules in decoder to avoid performing cross-attention on hidden heads. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) positions = positions.to(inputs_embeds.device) hidden_states = inputs_embeds + positions hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask/cross_attn_head_mask has a correct number of layers specified if desired for attn_mask, mask_name in zip([head_mask, cross_attn_head_mask], ["head_mask", "cross_attn_head_mask"]): if attn_mask is not None: if attn_mask.size()[0] != len(self.layers): raise ValueError( f"The `{mask_name}` should be specified for {len(self.layers)} layers, but it is for" f" {head_mask.size()[0]}." ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, use_cache) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), cross_attn_layer_head_mask=( cross_attn_head_mask[idx] if cross_attn_head_mask is not None else None ), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layernorm_embedding(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare BigBirdPegasus Model outputting raw hidden-states without any specific head on top.", BIGBIRD_PEGASUS_START_DOCSTRING, ) class BigBirdPegasusModel(BigBirdPegasusPreTrainedModel): _keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: BigBirdPegasusConfig): super().__init__(config) padding_idx, vocab_size = config.pad_token_id, config.vocab_size self.shared = nn.Embedding(vocab_size, config.d_model, padding_idx) self.encoder = BigBirdPegasusEncoder(config, self.shared) self.decoder = BigBirdPegasusDecoder(config, self.shared) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.shared def set_input_embeddings(self, value): self.shared = value self.encoder.embed_tokens = self.shared self.decoder.embed_tokens = self.shared def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, expected_output=_EXPECTED_OUTPUT_SHAPE, ) # Copied from transformers.models.bart.modeling_bart.BartModel.forward with Bart->BigBirdPegasus def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqModelOutput]: # different to other models, BigBirdPegasus automatically creates decoder_input_ids from # input_ids if no decoder_input_ids are provided if decoder_input_ids is None and decoder_inputs_embeds is None: if input_ids is None: raise ValueError( "If no `decoder_input_ids` or `decoder_inputs_embeds` are " "passed, `input_ids` cannot be `None`. Please pass either " "`input_ids` or `decoder_input_ids` or `decoder_inputs_embeds`." ) decoder_input_ids = shift_tokens_right( input_ids, self.config.pad_token_id, self.config.decoder_start_token_id ) output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The BigBirdPegasus Model with a language modeling head. Can be used for summarization.", BIGBIRD_PEGASUS_START_DOCSTRING, ) # Copied from transformers.models.bart.modeling_bart.BartForConditionalGeneration with Bart->BigBirdPegasus, BART->BIGBIRD_PEGASUS class BigBirdPegasusForConditionalGeneration(BigBirdPegasusPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"final_logits_bias", r"lm_head.weight", "encoder.embed_tokens.weight", "decoder.embed_tokens.weight", ] def __init__(self, config: BigBirdPegasusConfig): super().__init__(config) self.model = BigBirdPegasusModel(config) self.register_buffer("final_logits_bias", torch.zeros((1, self.model.shared.num_embeddings))) self.lm_head = nn.Linear(config.d_model, self.model.shared.num_embeddings, bias=False) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) self._resize_final_logits_bias(new_num_tokens) return new_embeddings def _resize_final_logits_bias(self, new_num_tokens: int) -> None: old_num_tokens = self.final_logits_bias.shape[-1] if new_num_tokens <= old_num_tokens: new_bias = self.final_logits_bias[:, :new_num_tokens] else: extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device) new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1) self.register_buffer("final_logits_bias", new_bias) def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(BIGBIRD_PEGASUS_GENERATION_EXAMPLE) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Returns: """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if use_cache: logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.") use_cache = False if decoder_input_ids is None and decoder_inputs_embeds is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) lm_logits = lm_logits + self.final_logits_bias.to(lm_logits.device) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return Seq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past_key_values=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, use_cache=None, encoder_outputs=None, **kwargs, ): # cut decoder_input_ids if past_key_values is used if past_key_values is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor): return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id) @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: # cached cross_attention states don't have to be reordered -> they are always the same reordered_past += ( tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:], ) return reordered_past @add_start_docstrings( """ BigBirdPegasus model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, BIGBIRD_PEGASUS_START_DOCSTRING, ) class BigBirdPegasusForSequenceClassification(BigBirdPegasusPreTrainedModel): _keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config: BigBirdPegasusConfig, **kwargs): super().__init__(config, **kwargs) self.model = BigBirdPegasusModel(config) self.classification_head = BigBirdPegasusClassificationHead( config.d_model, config.d_model, config.num_labels, config.classifier_dropout, ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_bart.BartForSequenceClassification.forward def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqSequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False if input_ids is None and inputs_embeds is not None: raise NotImplementedError( f"Passing input embeddings is currently not supported for {self.__class__.__name__}" ) outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] # last hidden state eos_mask = input_ids.eq(self.config.eos_token_id).to(hidden_states.device) if len(torch.unique_consecutive(eos_mask.sum(1))) > 1: raise ValueError("All examples must have the same number of <eos> tokens.") sentence_representation = hidden_states[eos_mask, :].view(hidden_states.size(0), -1, hidden_states.size(-1))[ :, -1, : ] logits = self.classification_head(sentence_representation) loss = None if labels is not None: if self.config.problem_type is None: if self.config.num_labels == 1: self.config.problem_type = "regression" elif self.config.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.config.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqSequenceClassifierOutput( loss=loss, logits=logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ BigBirdPegasus Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). """, BIGBIRD_PEGASUS_START_DOCSTRING, ) class BigBirdPegasusForQuestionAnswering(BigBirdPegasusPreTrainedModel): _keys_to_ignore_on_load_missing = ["encoder.embed_tokens.weight", "decoder.embed_tokens.weight"] def __init__(self, config): super().__init__(config) config.num_labels = 2 self.num_labels = config.num_labels self.model = BigBirdPegasusModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(BIGBIRD_PEGASUS_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Seq2SeqQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bart.modeling_bart.BartForQuestionAnswering.forward def forward( self, input_ids: torch.Tensor = None, attention_mask: Optional[torch.Tensor] = None, decoder_input_ids: Optional[torch.LongTensor] = None, decoder_attention_mask: Optional[torch.LongTensor] = None, head_mask: Optional[torch.Tensor] = None, decoder_head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, encoder_outputs: Optional[List[torch.FloatTensor]] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, decoder_inputs_embeds: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Seq2SeqQuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (*sequence_length*). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if start_positions is not None and end_positions is not None: use_cache = False outputs = self.model( input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, cross_attn_head_mask=cross_attn_head_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return Seq2SeqQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) # Copied from transformers.models.pegasus.modeling_pegasus.PegasusDecoderWrapper with Pegasus->BigBirdPegasus class BigBirdPegasusDecoderWrapper(BigBirdPegasusPreTrainedModel): """ This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is used in combination with the [`EncoderDecoderModel`] framework. """ def __init__(self, config): super().__init__(config) self.decoder = BigBirdPegasusDecoder(config) def forward(self, *args, **kwargs): return self.decoder(*args, **kwargs) class BigBirdPegasusForCausalLM(BigBirdPegasusPreTrainedModel): _keys_to_ignore_on_load_missing = ["lm_head.weight"] def __init__(self, config): config = copy.deepcopy(config) config.is_decoder = True config.is_encoder_decoder = False super().__init__(config) self.model = BigBirdPegasusDecoderWrapper(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.model.decoder.embed_tokens def set_input_embeddings(self, value): self.model.decoder.embed_tokens = value def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings def set_decoder(self, decoder): self.model.decoder = decoder def get_decoder(self): return self.model.decoder @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.Tensor] = None, cross_attn_head_mask: Optional[torch.Tensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. cross_attn_head_mask (`torch.Tensor` of shape `(decoder_layers, decoder_attention_heads)`, *optional*): Mask to nullify selected heads of the cross-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are only required when the model is used as a decoder in a Sequence to Sequence model. Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. Returns: Example: ```python >>> from transformers import AutoTokenizer, BigBirdPegasusForCausalLM >>> tokenizer = AutoTokenizer.from_pretrained("google/bigbird-pegasus-large-arxiv") >>> model = BigBirdPegasusForCausalLM.from_pretrained( ... "google/bigbird-pegasus-large-arxiv", add_cross_attention=False ... ) >>> assert model.config.is_decoder, f"{model.__class__} has to be configured as a decoder." >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model.decoder( input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, cross_attn_head_mask=cross_attn_head_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, use_cache=None, **kwargs ): # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_ids.shape) if past_key_values: input_ids = input_ids[:, -1:] # first step, decoder_cached_states are empty return { "input_ids": input_ids, # encoder_outputs is defined. input_ids not needed "attention_mask": attention_mask, "past_key_values": past_key_values, "use_cache": use_cache, } @staticmethod def _reorder_cache(past_key_values, beam_idx): reordered_past = () for layer_past in past_key_values: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/conditional_detr/image_processing_conditional_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Conditional DETR.""" import io import pathlib import warnings from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from transformers.feature_extraction_utils import BatchFeature from transformers.image_processing_utils import BaseImageProcessor, get_size_dict from transformers.image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, normalize, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from transformers.image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_coco_detection_annotations, valid_coco_panoptic_annotations, valid_images, ) from transformers.utils import ( is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, ) from transformers.utils.generic import ExplicitEnum, TensorType if is_torch_available(): import torch from torch import nn if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats AnnotationType = Dict[str, Union[int, str, List[Dict]]] class AnnotionFormat(ExplicitEnum): COCO_DETECTION = "coco_detection" COCO_PANOPTIC = "coco_panoptic" SUPPORTED_ANNOTATION_FORMATS = (AnnotionFormat.COCO_DETECTION, AnnotionFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ image_size = get_image_size(input_image) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->ConditionalDetr def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool = False): """ Convert the target in COCO format into the format expected by ConditionalDetr. """ image_height, image_width = get_image_size(image) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints[keep] if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->ConditionalDetr def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True ) -> Dict: """ Prepare a coco panoptic annotation for ConditionalDetr. """ image_height, image_width = get_image_size(image) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image def get_segmentation_image( masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False ): h, w = input_size final_h, final_w = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img # Copied from transformers.models.detr.image_processing_detr.get_mask_area def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: final_h, final_w = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) scores, labels = scores.squeeze(-1), labels.squeeze(-1) return scores, labels # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample with DetrForSegmentation->ConditionalDetrForSegmentation def post_process_panoptic_sample( out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85, ) -> Dict: """ Converts the output of [`ConditionalDetrForSegmentation`] into panoptic segmentation predictions for a single sample. Args: out_logits (`torch.Tensor`): The logits for this sample. masks (`torch.Tensor`): The predicted segmentation masks for this sample. boxes (`torch.Tensor`): The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y, width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding). processed_size (`Tuple[int, int]`): The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size after data augmentation but before batching. target_size (`Tuple[int, int]`): The target size of the image, `(height, width)` corresponding to the requested final size of the prediction. is_thing_map (`Dict`): A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not. threshold (`float`, *optional*, defaults to 0.85): The threshold used to binarize the segmentation masks. """ # we filter empty queries and detection below threshold scores, labels = score_labels_from_class_probabilities(out_logits) keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_boxes = center_to_corners_format(boxes[keep]) if len(cur_boxes) != len(cur_classes): raise ValueError("Not as many boxes as there are classes") cur_masks = masks[keep] cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) cur_masks = safe_squeeze(cur_masks, 1) b, h, w = cur_masks.shape # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.reshape(b, -1) stuff_equiv_classes = defaultdict(list) for k, label in enumerate(cur_classes): if not is_thing_map[label]: stuff_equiv_classes[label].append(k) seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) # We filter out any mask that is too small if cur_classes.size() > 0: # We know filter empty masks as long as we find some filtered_small = np.array([a <= 4 for a in area], dtype=bool) while filtered_small.any(): cur_masks = cur_masks[~filtered_small] cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) filtered_small = np.array([a <= 4 for a in area], dtype=bool) else: cur_classes = np.ones((1, 1), dtype=np.int64) segments_info = [ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a} for i, (cat, a) in enumerate(zip(cur_classes, area)) ] del cur_classes with io.BytesIO() as out: PIL.Image.fromarray(seg_img).save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} return predictions # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class ConditionalDetrImageProcessor(BaseImageProcessor): r""" Constructs a Conditional Detr image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__ def __init__( self, format: Union[str, AnnotionFormat] = AnnotionFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None if size is None else 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad @property # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.max_size def max_size(self): warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) return self.size["longest_edge"] @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->ConditionalDetr def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `ConditionalDetrImageProcessor.from_pretrained(checkpoint, size=600, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "pad_and_return_pixel_mask" in kwargs: image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask") return super().from_dict(image_processor_dict, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->ConditionalDetr def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotionFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, ) -> Dict: """ Prepare an annotation for feeding into ConditionalDetr model. """ format = format if format is not None else self.format if format == AnnotionFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks) elif format == AnnotionFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): warnings.warn( "The `prepare` method is deprecated and will be removed in a future version. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): warnings.warn("The `convert_coco_poly_to_mask` method is deprecated and will be removed in a future version. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection with DETR->ConditionalDetr def prepare_coco_detection(self, *args, **kwargs): warnings.warn("The `prepare_coco_detection` method is deprecated and will be removed in a future version. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): warnings.warn("The `prepare_coco_panoptic` method is deprecated and will be removed in a future version. ") return prepare_coco_panoptic_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize(image, size=size, resample=resample, data_format=data_format) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: Union[float, int], data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List[ImageInput], return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> BatchFeature: """ Pads a batch of images with zeros to the size of largest height and width in the batch and returns their corresponding pixel mask. Args: images (`List[np.ndarray]`): Batch of images to pad. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ warnings.warn( "This method is deprecated and will be removed in v4.27.0. Please use pad instead.", FutureWarning ) # pad expects a list of np.ndarray, but the previous feature extractors expected torch tensors images = [to_numpy_array(image) for image in pixel_values_list] return self.pad( images=images, return_pixel_mask=True, return_tensors=return_tensors, data_format=data_format, ) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess def preprocess( self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotionFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotionation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotionation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotionFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`str` or `ChannelDimension`, *optional*, defaults to self.data_format): The channel dimension format of the image. If not provided, it will be the same as the input image. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead.", FutureWarning, ) do_pad = kwargs.pop("pad_and_return_pixel_mask") max_size = None if "max_size" in kwargs: warnings.warn( "The `max_size` argument is deprecated and will be removed in a future version, use" " `size['longest_edge']` instead.", FutureWarning, ) size = kwargs.pop("max_size") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, max_size=max_size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") images = make_list_of_images(images) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotionFormat(format) if annotations is not None: if format == AnnotionFormat.COCO_DETECTION and not valid_coco_detection_annotations(annotations): raise ValueError( "Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts" "(batch of images) with the following keys: `image_id` and `annotations`, with the latter " "being a list of annotations in the COCO format." ) elif format == AnnotionFormat.COCO_PANOPTIC and not valid_coco_panoptic_annotations(annotations): raise ValueError( "Invalid COCO panoptic annotations. Annotations must a dict (single image) of list of dicts " "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with " "the latter being a list of annotations in the COCO format." ) elif format not in SUPPORTED_ANNOTATION_FORMATS: raise ValueError( f"Unsupported annotation format: {format} must be one of {SUPPORTED_ANNOTATION_FORMATS}" ) if ( masks_path is not None and format == AnnotionFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image) resized_image = self.resize(image, size=size, max_size=max_size, resample=resample) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std) for image in images] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad(images, return_pixel_mask=True, data_format=data_format) else: images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs # POSTPROCESSING METHODS - TODO: add support for other frameworks def post_process(self, outputs, target_sizes): """ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results # Copied from transformers.models.deformable_detr.image_processing_deformable_detr.DeformableDetrImageProcessor.post_process_object_detection with DeformableDetr->ConditionalDetr def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation with Detr->ConditionalDetr def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If unset, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_instance_segmentation with Detr->ConditionalDetr def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_panoptic_segmentation with Detr->ConditionalDetr def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): The outputs from [`ConditionalDetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If unset, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: warnings.warn("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Conditional DETR.""" import io import pathlib import warnings from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from transformers.feature_extraction_utils import BatchFeature from transformers.image_processing_utils import BaseImageProcessor, get_size_dict from transformers.image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, normalize, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from transformers.image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_coco_detection_annotations, valid_coco_panoptic_annotations, valid_images, ) from transformers.utils import ( is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, ) from transformers.utils.generic import ExplicitEnum, TensorType if is_torch_available(): import torch from torch import nn from transformers.pytorch_utils import torch_int_div if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats AnnotationType = Dict[str, Union[int, str, List[Dict]]] class AnnotionFormat(ExplicitEnum): COCO_DETECTION = "coco_detection" COCO_PANOPTIC = "coco_panoptic" SUPPORTED_ANNOTATION_FORMATS = (AnnotionFormat.COCO_DETECTION, AnnotionFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ image_size = get_image_size(input_image) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->ConditionalDetr def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool = False): """ Convert the target in COCO format into the format expected by ConditionalDetr. """ image_height, image_width = get_image_size(image) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints[keep] if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->ConditionalDetr def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True ) -> Dict: """ Prepare a coco panoptic annotation for ConditionalDetr. """ image_height, image_width = get_image_size(image) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image def get_segmentation_image( masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False ): h, w = input_size final_h, final_w = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img # Copied from transformers.models.detr.image_processing_detr.get_mask_area def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: final_h, final_w = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) scores, labels = scores.squeeze(-1), labels.squeeze(-1) return scores, labels # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample with DetrForSegmentation->ConditionalDetrForSegmentation def post_process_panoptic_sample( out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85, ) -> Dict: """ Converts the output of [`ConditionalDetrForSegmentation`] into panoptic segmentation predictions for a single sample. Args: out_logits (`torch.Tensor`): The logits for this sample. masks (`torch.Tensor`): The predicted segmentation masks for this sample. boxes (`torch.Tensor`): The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y, width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding). processed_size (`Tuple[int, int]`): The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size after data augmentation but before batching. target_size (`Tuple[int, int]`): The target size of the image, `(height, width)` corresponding to the requested final size of the prediction. is_thing_map (`Dict`): A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not. threshold (`float`, *optional*, defaults to 0.85): The threshold used to binarize the segmentation masks. """ # we filter empty queries and detection below threshold scores, labels = score_labels_from_class_probabilities(out_logits) keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_boxes = center_to_corners_format(boxes[keep]) if len(cur_boxes) != len(cur_classes): raise ValueError("Not as many boxes as there are classes") cur_masks = masks[keep] cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) cur_masks = safe_squeeze(cur_masks, 1) b, h, w = cur_masks.shape # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.reshape(b, -1) stuff_equiv_classes = defaultdict(list) for k, label in enumerate(cur_classes): if not is_thing_map[label]: stuff_equiv_classes[label].append(k) seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) # We filter out any mask that is too small if cur_classes.size() > 0: # We know filter empty masks as long as we find some filtered_small = np.array([a <= 4 for a in area], dtype=bool) while filtered_small.any(): cur_masks = cur_masks[~filtered_small] cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) filtered_small = np.array([a <= 4 for a in area], dtype=bool) else: cur_classes = np.ones((1, 1), dtype=np.int64) segments_info = [ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a} for i, (cat, a) in enumerate(zip(cur_classes, area)) ] del cur_classes with io.BytesIO() as out: PIL.Image.fromarray(seg_img).save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} return predictions # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class ConditionalDetrImageProcessor(BaseImageProcessor): r""" Constructs a Conditional Detr image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__ def __init__( self, format: Union[str, AnnotionFormat] = AnnotionFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None if size is None else 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad @property # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.max_size def max_size(self): warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) return self.size["longest_edge"] @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->ConditionalDetr def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `ConditionalDetrImageProcessor.from_pretrained(checkpoint, size=600, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "pad_and_return_pixel_mask" in kwargs: image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask") return super().from_dict(image_processor_dict, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->ConditionalDetr def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotionFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, ) -> Dict: """ Prepare an annotation for feeding into ConditionalDetr model. """ format = format if format is not None else self.format if format == AnnotionFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks) elif format == AnnotionFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=False, masks_path=None): warnings.warn( "The `prepare` method is deprecated and will be removed in a future version. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): warnings.warn("The `convert_coco_poly_to_mask` method is deprecated and will be removed in a future version. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection with DETR->ConditionalDetr def prepare_coco_detection(self, *args, **kwargs): warnings.warn("The `prepare_coco_detection` method is deprecated and will be removed in a future version. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): warnings.warn("The `prepare_coco_panoptic` method is deprecated and will be removed in a future version. ") return prepare_coco_panoptic_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize(image, size=size, resample=resample, data_format=data_format) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: Union[float, int], data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List[ImageInput], return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> BatchFeature: """ Pads a batch of images with zeros to the size of largest height and width in the batch and returns their corresponding pixel mask. Args: images (`List[np.ndarray]`): Batch of images to pad. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ warnings.warn( "This method is deprecated and will be removed in v4.27.0. Please use pad instead.", FutureWarning ) # pad expects a list of np.ndarray, but the previous feature extractors expected torch tensors images = [to_numpy_array(image) for image in pixel_values_list] return self.pad( images=images, return_pixel_mask=True, return_tensors=return_tensors, data_format=data_format, ) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess def preprocess( self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotionFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotionation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotionation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotionFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`str` or `ChannelDimension`, *optional*, defaults to self.data_format): The channel dimension format of the image. If not provided, it will be the same as the input image. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead.", FutureWarning, ) do_pad = kwargs.pop("pad_and_return_pixel_mask") max_size = None if "max_size" in kwargs: warnings.warn( "The `max_size` argument is deprecated and will be removed in a future version, use" " `size['longest_edge']` instead.", FutureWarning, ) size = kwargs.pop("max_size") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, max_size=max_size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") images = make_list_of_images(images) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotionFormat(format) if annotations is not None: if format == AnnotionFormat.COCO_DETECTION and not valid_coco_detection_annotations(annotations): raise ValueError( "Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts" "(batch of images) with the following keys: `image_id` and `annotations`, with the latter " "being a list of annotations in the COCO format." ) elif format == AnnotionFormat.COCO_PANOPTIC and not valid_coco_panoptic_annotations(annotations): raise ValueError( "Invalid COCO panoptic annotations. Annotations must a dict (single image) of list of dicts " "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with " "the latter being a list of annotations in the COCO format." ) elif format not in SUPPORTED_ANNOTATION_FORMATS: raise ValueError( f"Unsupported annotation format: {format} must be one of {SUPPORTED_ANNOTATION_FORMATS}" ) if ( masks_path is not None and format == AnnotionFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image) resized_image = self.resize(image, size=size, max_size=max_size, resample=resample) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std) for image in images] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad(images, return_pixel_mask=True, data_format=data_format) else: images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs # POSTPROCESSING METHODS - TODO: add support for other frameworks def post_process(self, outputs, target_sizes): """ Converts the output of [`ConditionalDetrForObjectDetection`] into the format expected by the COCO api. Only supports PyTorch. Args: outputs ([`ConditionalDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (h, w) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 300, dim=1) scores = topk_values topk_boxes = torch_int_div(topk_indexes, out_logits.shape[2]) labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results # Copied from transformers.models.deformable_detr.image_processing_deformable_detr.DeformableDetrImageProcessor.post_process_object_detection with DeformableDetr->ConditionalDetr def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the raw output of [`ConditionalDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = torch_int_div(topk_indexes, out_logits.shape[2]) labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_semantic_segmentation with Detr->ConditionalDetr def post_process_semantic_segmentation(self, outputs, target_sizes: List[Tuple[int, int]] = None): """ Converts the output of [`ConditionalDetrForSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): A list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If unset, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_instance_segmentation with Detr->ConditionalDetr def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If unset, predictions will not be resized. return_coco_annotation (`bool`, *optional*): Defaults to `False`. If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=[], target_size=target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.post_process_panoptic_segmentation with Detr->ConditionalDetr def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`ConditionalDetrForSegmentation`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`ConditionalDetrForSegmentation`]): The outputs from [`ConditionalDetrForSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If unset, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id` or `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: warnings.warn("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.pred_masks # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/conditional_detr/modeling_conditional_detr.py
# coding=utf-8 # Copyright 2022 Microsoft Research Asia and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Conditional DETR model.""" import math import random from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import torch from torch import Tensor, nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import torch_int_div from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_timm_available, is_vision_available, logging, replace_return_docstrings, requires_backends, ) from ..auto import AutoBackbone from .configuration_conditional_detr import ConditionalDetrConfig if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_timm_available(): from timm import create_model if is_vision_available(): from transformers.image_transforms import center_to_corners_format logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ConditionalDetrConfig" _CHECKPOINT_FOR_DOC = "microsoft/conditional-detr-resnet-50" CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/conditional-detr-resnet-50", # See all Conditional DETR models at https://huggingface.co/models?filter=conditional_detr ] @dataclass class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ Base class for outputs of the Conditional DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ConditionalDetrModelOutput(Seq2SeqModelOutput): """ Base class for outputs of the Conditional DETR encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->ConditionalDetr class ConditionalDetrObjectDetectionOutput(ModelOutput): """ Output type of [`ConditionalDetrForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.detr.modeling_detr.DetrSegmentationOutput with Detr->ConditionalDetr class ConditionalDetrSegmentationOutput(ModelOutput): """ Output type of [`ConditionalDetrForSegmentation`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): Segmentation masks logits for all queries. See also [`~ConditionalDetrImageProcessor.post_process_semantic_segmentation`] or [`~ConditionalDetrImageProcessor.post_process_instance_segmentation`] [`~ConditionalDetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None pred_masks: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->ConditionalDetr class ConditionalDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->ConditionalDetr def replace_batch_norm(m, name=""): for attr_str in dir(m): target_attr = getattr(m, attr_str) if isinstance(target_attr, nn.BatchNorm2d): frozen = ConditionalDetrFrozenBatchNorm2d(target_attr.num_features) bn = getattr(m, attr_str) frozen.weight.data.copy_(bn.weight) frozen.bias.data.copy_(bn.bias) frozen.running_mean.data.copy_(bn.running_mean) frozen.running_var.data.copy_(bn.running_var) setattr(m, attr_str, frozen) for n, ch in m.named_children(): replace_batch_norm(ch, n) # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder class ConditionalDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DetrFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ["timm"]) kwargs = {} if config.dilation: kwargs["output_stride"] = 16 backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=(1, 2, 3, 4), in_chans=config.num_channels, **kwargs, ) else: backbone = AutoBackbone.from_config(config.backbone_config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out # Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->ConditionalDetr class ConditionalDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: # position encoding pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return out, pos # Copied from transformers.models.detr.modeling_detr._expand_mask with Detr->ConditionalDetr def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): """ Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. """ batch_size, source_len = mask.size() target_len = target_len if target_len is not None else source_len expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) # Copied from transformers.models.detr.modeling_detr.DetrSinePositionEmbedding with Detr->ConditionalDetr class ConditionalDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * torch_int_div(dim_t, 2) / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding with Detr->ConditionalDetr class ConditionalDetrLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->ConditionalDetr def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = ConditionalDetrSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = ConditionalDetrLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding # function to generate sine positional embedding for 2d coordinates def gen_sine_position_embeddings(pos_tensor): scale = 2 * math.pi dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device) dim_t = 10000 ** (2 * (dim_t // 2) / 128) x_embed = pos_tensor[:, :, 0] * scale y_embed = pos_tensor[:, :, 1] * scale pos_x = x_embed[:, :, None] / dim_t pos_y = y_embed[:, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) pos = torch.cat((pos_y, pos_x), dim=2) return pos def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrAttention class DetrAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, key_value_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # add key-value position embeddings to the key value states if key_value_position_embeddings is not None: key_value_states_original = key_value_states key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class ConditionalDetrAttention(nn.Module): """ Cross-Attention used in Conditional DETR 'Conditional DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. """ def __init__( self, embed_dim: int, out_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.out_dim = out_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) # head dimension of values self.v_head_dim = out_dim // num_heads if self.v_head_dim * num_heads != self.out_dim: raise ValueError( f"out_dim must be divisible by num_heads (got `out_dim`: {self.out_dim} and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.out_proj = nn.Linear(out_dim, out_dim, bias=bias) def _qk_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def _v_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.v_head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, key_states: Optional[torch.Tensor] = None, value_states: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, _ = hidden_states.size() # get query proj query_states = hidden_states * self.scaling # get key, value proj key_states = self._qk_shape(key_states, -1, batch_size) value_states = self._v_shape(value_states, -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) v_proj_shape = (batch_size * self.num_heads, -1, self.v_head_dim) query_states = self._qk_shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*v_proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.v_head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.v_head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.v_head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, self.out_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.detr.modeling_detr.DetrEncoderLayer with DetrEncoderLayer->ConditionalDetrEncoderLayer,DetrConfig->ConditionalDetrConfig class ConditionalDetrEncoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetrAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. position_embeddings (`torch.FloatTensor`, *optional*): position embeddings, to be added to hidden_states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class ConditionalDetrDecoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model d_model = config.d_model # Decoder Self-Attention projections self.sa_qcontent_proj = nn.Linear(d_model, d_model) self.sa_qpos_proj = nn.Linear(d_model, d_model) self.sa_kcontent_proj = nn.Linear(d_model, d_model) self.sa_kpos_proj = nn.Linear(d_model, d_model) self.sa_v_proj = nn.Linear(d_model, d_model) self.self_attn = ConditionalDetrAttention( embed_dim=self.embed_dim, out_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # Decoder Cross-Attention projections self.ca_qcontent_proj = nn.Linear(d_model, d_model) self.ca_qpos_proj = nn.Linear(d_model, d_model) self.ca_kcontent_proj = nn.Linear(d_model, d_model) self.ca_kpos_proj = nn.Linear(d_model, d_model) self.ca_v_proj = nn.Linear(d_model, d_model) self.ca_qpos_sine_proj = nn.Linear(d_model, d_model) self.encoder_attn = ConditionalDetrAttention( self.embed_dim * 2, self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) self.nhead = config.decoder_attention_heads def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, query_position_embeddings: Optional[torch.Tensor] = None, query_sine_embed: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, is_first: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. position_embeddings (`torch.FloatTensor`, *optional*): position embeddings that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): position embeddings that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # ========== Begin of Self-Attention ============= # Apply projections here # shape: num_queries x batch_size x 256 q_content = self.sa_qcontent_proj( hidden_states ) # target is the input of the first decoder layer. zero by default. q_pos = self.sa_qpos_proj(query_position_embeddings) k_content = self.sa_kcontent_proj(hidden_states) k_pos = self.sa_kpos_proj(query_position_embeddings) v = self.sa_v_proj(hidden_states) _, num_queries, n_model = q_content.shape q = q_content + q_pos k = k_content + k_pos hidden_states, self_attn_weights = self.self_attn( hidden_states=q, attention_mask=attention_mask, key_states=k, value_states=v, output_attentions=output_attentions, ) # ============ End of Self-Attention ============= hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # ========== Begin of Cross-Attention ============= # Apply projections here # shape: num_queries x batch_size x 256 q_content = self.ca_qcontent_proj(hidden_states) k_content = self.ca_kcontent_proj(encoder_hidden_states) v = self.ca_v_proj(encoder_hidden_states) batch_size, num_queries, n_model = q_content.shape _, source_len, _ = k_content.shape k_pos = self.ca_kpos_proj(position_embeddings) # For the first decoder layer, we concatenate the positional embedding predicted from # the object query (the positional embedding) into the original query (key) in DETR. if is_first: q_pos = self.ca_qpos_proj(query_position_embeddings) q = q_content + q_pos k = k_content + k_pos else: q = q_content k = k_content q = q.view(batch_size, num_queries, self.nhead, n_model // self.nhead) query_sine_embed = self.ca_qpos_sine_proj(query_sine_embed) query_sine_embed = query_sine_embed.view(batch_size, num_queries, self.nhead, n_model // self.nhead) q = torch.cat([q, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2) k = k.view(batch_size, source_len, self.nhead, n_model // self.nhead) k_pos = k_pos.view(batch_size, source_len, self.nhead, n_model // self.nhead) k = torch.cat([k, k_pos], dim=3).view(batch_size, source_len, n_model * 2) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=q, attention_mask=encoder_attention_mask, key_states=k, value_states=v, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # ============ End of Cross-Attention ============= # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead with Detr->ConditionalDetr class ConditionalDetrClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->MLP class MLP(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.detr.modeling_detr.DetrPreTrainedModel with Detr->ConditionalDetr class ConditionalDetrPreTrainedModel(PreTrainedModel): config_class = ConditionalDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module): std = self.config.init_std xavier_std = self.config.init_xavier_std if isinstance(module, ConditionalDetrMHAttentionMap): nn.init.zeros_(module.k_linear.bias) nn.init.zeros_(module.q_linear.bias) nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) elif isinstance(module, ConditionalDetrLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ConditionalDetrDecoder): module.gradient_checkpointing = value CONDITIONAL_DETR_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ConditionalDetrConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CONDITIONAL_DETR_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConditionalDetrImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.detr.modeling_detr.DetrEncoder with Detr->ConditionalDetr,DETR->ConditionalDETR class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`ConditionalDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for ConditionalDETR: - position_embeddings are added to the forward pass. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) # in the original ConditionalDETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: # we add position_embeddings as extra input to the encoder_layer layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class ConditionalDetrDecoder(ConditionalDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`ConditionalDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for Conditional DETR: - position_embeddings and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) # in Conditional DETR, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) d_model = config.d_model self.gradient_checkpointing = False # query_scale is the FFN applied on f to generate transformation T self.query_scale = MLP(d_model, d_model, d_model, 2) self.ref_point_head = MLP(d_model, d_model, 2, 2) for layer_id in range(config.decoder_layers - 1): self.layers[layer_id + 1].ca_qpos_proj = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The query embeddings that are passed into the decoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: - 1 for queries that are **not masked**, - 0 for queries that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] combined_attention_mask = None if attention_mask is not None and combined_attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] combined_attention_mask = combined_attention_mask + _expand_mask( attention_mask, inputs_embeds.dtype, target_len=input_shape[-1] ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] encoder_attention_mask = _expand_mask( encoder_attention_mask, inputs_embeds.dtype, target_len=input_shape[-1] ) # optional intermediate hidden states intermediate = () if self.config.auxiliary_loss else None # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None reference_points_before_sigmoid = self.ref_point_head( query_position_embeddings ) # [num_queries, batch_size, 2] reference_points = reference_points_before_sigmoid.sigmoid().transpose(0, 1) obj_center = reference_points[..., :2].transpose(0, 1) # get sine embedding for the query vector query_sine_embed_before_transformation = gen_sine_position_embeddings(obj_center) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue if idx == 0: pos_transformation = 1 else: pos_transformation = self.query_scale(hidden_states) # apply transformation query_sine_embed = query_sine_embed_before_transformation * pos_transformation if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, combined_attention_mask, position_embeddings, query_position_embeddings, query_sine_embed, encoder_hidden_states, encoder_attention_mask, None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=combined_attention_mask, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, query_sine_embed=query_sine_embed, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, is_first=(idx == 0), ) hidden_states = layer_outputs[0] if self.config.auxiliary_loss: hidden_states = self.layernorm(hidden_states) intermediate += (hidden_states,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # finally, apply layernorm hidden_states = self.layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) # stack intermediate decoder activations if self.config.auxiliary_loss: intermediate = torch.stack(intermediate) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate, reference_points, ] if v is not None ) return ConditionalDetrDecoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate, reference_points=reference_points, ) @add_start_docstrings( """ The bare Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrModel(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # Create backbone + positional encoding backbone = ConditionalDetrConvEncoder(config) position_embeddings = build_position_encoding(config) self.backbone = ConditionalDetrConvModel(backbone, position_embeddings) # Create projection layer self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1) self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = ConditionalDetrEncoder(config) self.decoder = ConditionalDetrDecoder(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModel.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # the last hidden states are the final query embeddings of the Transformer decoder >>> # these are of shape (batch_size, num_queries, hidden_size) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), device=device) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # pixel_values should be of shape (batch_size, num_channels, height, width) # pixel_mask should be of shape (batch_size, height, width) features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # get final feature map and downsampled mask feature_map, mask = features[-1] if mask is None: raise ValueError("Backbone does not return downsampled pixel mask") # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) projected_feature_map = self.input_projection(feature_map) # Third, flatten the feature map + position embeddings of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) position_embeddings = position_embeddings_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) # Fourth, sent flattened_features + flattened_mask + position embeddings through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + position embeddings through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.decoder( inputs_embeds=queries, attention_mask=None, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return ConditionalDetrModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, reference_points=decoder_outputs.reference_points, ) @add_start_docstrings( """ CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrForObjectDetection(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # CONDITIONAL DETR encoder-decoder model self.model = ConditionalDetrModel(config) # Object detection heads self.class_labels_classifier = nn.Linear( config.d_model, config.num_labels ) # We add one for the "no object" class self.bbox_predictor = ConditionalDetrMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) # Initialize weights and apply final processing self.post_init() # taken from https://github.com/Atten4Vis/conditionalDETR/blob/master/models/conditional_detr.py @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModelForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to COCO API >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through CONDITIONAL_DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # class logits + predicted bounding boxes logits = self.class_labels_classifier(sequence_output) reference = outputs.reference_points if return_dict else outputs[-1] reference_before_sigmoid = inverse_sigmoid(reference).transpose(0, 1) outputs_coords = [] hs = sequence_output tmp = self.bbox_predictor(hs) tmp[..., :2] += reference_before_sigmoid pred_boxes = tmp.sigmoid() # pred_boxes = self.bbox_predictor(sequence_output).sigmoid() loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = ConditionalDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = ConditionalDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_labels_classifier(intermediate) for lvl in range(hs.shape[0]): tmp = self.bbox_predictor(hs[lvl]) tmp[..., :2] += reference_before_sigmoid outputs_coord = tmp.sigmoid() outputs_coords.append(outputs_coord) outputs_coord = torch.stack(outputs_coords) auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": self.config.cls_loss_coefficient, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs return ((loss, loss_dict) + output) if loss is not None else output return ConditionalDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, for tasks such as COCO panoptic. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrForSegmentation(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # object detection model self.conditional_detr = ConditionalDetrForObjectDetection(config) # segmentation head hidden_size, number_of_heads = config.d_model, config.encoder_attention_heads intermediate_channel_sizes = self.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes self.mask_head = ConditionalDetrMaskHeadSmallConv( hidden_size + number_of_heads, intermediate_channel_sizes[::-1][-3:], hidden_size ) self.bbox_attention = ConditionalDetrMHAttentionMap( hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrSegmentationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss, DICE/F-1 loss and Focal loss. List of dicts, each dictionary containing at least the following 3 keys: 'class_labels', 'boxes' and 'masks' (the class labels, bounding boxes and segmentation masks of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)`, the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)` and the masks a `torch.FloatTensor` of shape `(number of bounding boxes in the image, height, width)`. Returns: Examples: ```python >>> import io >>> import requests >>> from PIL import Image >>> import torch >>> import numpy >>> from transformers import ( ... AutoImageProcessor, ... ConditionalDetrConfig, ... ConditionalDetrForSegmentation, ... ) >>> from transformers.image_transforms import rgb_to_id >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # randomly initialize all weights of the model >>> config = ConditionalDetrConfig() >>> model = ConditionalDetrForSegmentation(config) >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # Use the `post_process_panoptic_segmentation` method of the `image_processor` to retrieve post-processed panoptic segmentation maps >>> # Segmentation results are returned as a list of dictionaries >>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[(300, 500)]) >>> # A tensor of shape (height, width) where each value denotes a segment id, filled with -1 if no segment is found >>> panoptic_seg = result[0]["segmentation"] >>> # Get prediction score and segment_id to class_id mapping of each segment >>> panoptic_segments_info = result[0]["segments_info"] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) # First, get list of feature maps and position embeddings features, position_embeddings_list = self.conditional_detr.model.backbone(pixel_values, pixel_mask=pixel_mask) # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) feature_map, mask = features[-1] batch_size, num_channels, height, width = feature_map.shape projected_feature_map = self.conditional_detr.model.input_projection(feature_map) # Third, flatten the feature map + position embeddings of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) position_embeddings = position_embeddings_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) # Fourth, sent flattened_features + flattened_mask + position embeddings through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.conditional_detr.model.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + position embeddings through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.conditional_detr.model.query_position_embeddings.weight.unsqueeze(0).repeat( batch_size, 1, 1 ) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.conditional_detr.model.decoder( inputs_embeds=queries, attention_mask=None, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Sixth, compute logits, pred_boxes and pred_masks logits = self.conditional_detr.class_labels_classifier(sequence_output) pred_boxes = self.conditional_detr.bbox_predictor(sequence_output).sigmoid() memory = encoder_outputs[0].permute(0, 2, 1).view(batch_size, self.config.d_model, height, width) mask = flattened_mask.view(batch_size, height, width) # FIXME h_boxes takes the last one computed, keep this in mind # important: we need to reverse the mask, since in the original implementation the mask works reversed # bbox_mask is of shape (batch_size, num_queries, number_of_attention_heads in bbox_attention, height/32, width/32) bbox_mask = self.bbox_attention(sequence_output, memory, mask=~mask) seg_masks = self.mask_head(projected_feature_map, bbox_mask, [features[2][0], features[1][0], features[0][0]]) pred_masks = seg_masks.view( batch_size, self.conditional_detr.config.num_queries, seg_masks.shape[-2], seg_masks.shape[-1] ) loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = ConditionalDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality", "masks"] criterion = ConditionalDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes outputs_loss["pred_masks"] = pred_masks if self.config.auxiliary_loss: intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1] outputs_class = self.class_labels_classifier(intermediate) outputs_coord = self.bbox_predictor(intermediate).sigmoid() auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient weight_dict["loss_mask"] = self.config.mask_loss_coefficient weight_dict["loss_dice"] = self.config.dice_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes, pred_masks) + auxiliary_outputs + decoder_outputs + encoder_outputs else: output = (logits, pred_boxes, pred_masks) + decoder_outputs + encoder_outputs return ((loss, loss_dict) + output) if loss is not None else output return ConditionalDetrSegmentationOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, pred_masks=pred_masks, auxiliary_outputs=auxiliary_outputs, last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def _expand(tensor, length: int): return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) # Copied from transformers.models.detr.modeling_detr.DetrMaskHeadSmallConv with Detr->ConditionalDetr class ConditionalDetrMaskHeadSmallConv(nn.Module): """ Simple convolutional head, using group norm. Upsampling is done using a FPN approach """ def __init__(self, dim, fpn_dims, context_dim): super().__init__() if dim % 8 != 0: raise ValueError( "The hidden_size + number of attention heads must be divisible by 8 as the number of groups in" " GroupNorm is set to 8" ) inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] self.lay1 = nn.Conv2d(dim, dim, 3, padding=1) self.gn1 = nn.GroupNorm(8, dim) self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1) self.gn2 = nn.GroupNorm(8, inter_dims[1]) self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) self.gn3 = nn.GroupNorm(8, inter_dims[2]) self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) self.gn4 = nn.GroupNorm(8, inter_dims[3]) self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) self.gn5 = nn.GroupNorm(8, inter_dims[4]) self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1) self.dim = dim self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1) self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1) self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): # here we concatenate x, the projected feature map, of shape (batch_size, d_model, heigth/32, width/32) with # the bbox_mask = the attention maps of shape (batch_size, n_queries, n_heads, height/32, width/32). # We expand the projected feature map to match the number of heads. x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) x = self.lay1(x) x = self.gn1(x) x = nn.functional.relu(x) x = self.lay2(x) x = self.gn2(x) x = nn.functional.relu(x) cur_fpn = self.adapter1(fpns[0]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay3(x) x = self.gn3(x) x = nn.functional.relu(x) cur_fpn = self.adapter2(fpns[1]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay4(x) x = self.gn4(x) x = nn.functional.relu(x) cur_fpn = self.adapter3(fpns[2]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay5(x) x = self.gn5(x) x = nn.functional.relu(x) x = self.out_lay(x) return x # Copied from transformers.models.detr.modeling_detr.DetrMHAttentionMap with Detr->ConditionalDetr class ConditionalDetrMHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 def forward(self, q, k, mask: Optional[Tensor] = None): q = self.q_linear(q) k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum("bqnc,bnchw->bqnhw", queries_per_head * self.normalize_fact, keys_per_head) if mask is not None: weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class ConditionalDetrLoss(nn.Module): """ This class computes the losses for ConditionalDetrForObjectDetection/ConditionalDetrForSegmentation. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`ConditionalDetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.__init__ def __init__(self, matcher, num_classes, focal_alpha, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_labels def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_masks def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx # Copied from transformers.models.detr.modeling_detr.DetrLoss.get_loss def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) # Copied from transformers.models.detr.modeling_detr.DetrLoss.forward def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # (Niels): comment out function below, distributed training to be added # if is_dist_avail_and_initialized(): # torch.distributed.all_reduce(num_boxes) # (Niels) in original implementation, num_boxes is divided by get_world_size() num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->ConditionalDetr class ConditionalDetrMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrHungarianMatcher with DeformableDetr->ConditionalDetr class ConditionalDetrHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # Copied from transformers.models.detr.modeling_detr._max_by_axis def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.modeling_detr.NestedTensor class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) # Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask)
# coding=utf-8 # Copyright 2022 Microsoft Research Asia and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Conditional DETR model.""" import math import random from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import torch from torch import Tensor, nn from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithCrossAttentions, Seq2SeqModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import torch_int_div from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_timm_available, is_vision_available, logging, replace_return_docstrings, requires_backends, ) from ..auto import AutoBackbone from .configuration_conditional_detr import ConditionalDetrConfig if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_timm_available(): from timm import create_model if is_vision_available(): from transformers.image_transforms import center_to_corners_format logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "ConditionalDetrConfig" _CHECKPOINT_FOR_DOC = "microsoft/conditional-detr-resnet-50" CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/conditional-detr-resnet-50", # See all Conditional DETR models at https://huggingface.co/models?filter=conditional_detr ] @dataclass class ConditionalDetrDecoderOutput(BaseModelOutputWithCrossAttentions): """ Base class for outputs of the Conditional DETR decoder. This class adds one attribute to BaseModelOutputWithCrossAttentions, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, num_queries, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ConditionalDetrModelOutput(Seq2SeqModelOutput): """ Base class for outputs of the Conditional DETR encoder-decoder model. This class adds one attribute to Seq2SeqModelOutput, namely an optional stack of intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. This is useful when training the model with auxiliary decoding losses. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(config.decoder_layers, batch_size, sequence_length, hidden_size)`, *optional*, returned when `config.auxiliary_loss=True`): Intermediate decoder activations, i.e. the output of each decoder layer, each of them gone through a layernorm. """ intermediate_hidden_states: Optional[torch.FloatTensor] = None reference_points: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.detr.modeling_detr.DetrObjectDetectionOutput with Detr->ConditionalDetr class ConditionalDetrObjectDetectionOutput(ModelOutput): """ Output type of [`ConditionalDetrForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.detr.modeling_detr.DetrSegmentationOutput with Detr->ConditionalDetr class ConditionalDetrSegmentationOutput(ModelOutput): """ Output type of [`ConditionalDetrForSegmentation`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~ConditionalDetrImageProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. pred_masks (`torch.FloatTensor` of shape `(batch_size, num_queries, height/4, width/4)`): Segmentation masks logits for all queries. See also [`~ConditionalDetrImageProcessor.post_process_semantic_segmentation`] or [`~ConditionalDetrImageProcessor.post_process_instance_segmentation`] [`~ConditionalDetrImageProcessor.post_process_panoptic_segmentation`] to evaluate semantic, instance and panoptic segmentation masks respectively. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None pred_masks: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None last_hidden_state: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->ConditionalDetr class ConditionalDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->ConditionalDetr def replace_batch_norm(m, name=""): for attr_str in dir(m): target_attr = getattr(m, attr_str) if isinstance(target_attr, nn.BatchNorm2d): frozen = ConditionalDetrFrozenBatchNorm2d(target_attr.num_features) bn = getattr(m, attr_str) frozen.weight.data.copy_(bn.weight) frozen.bias.data.copy_(bn.bias) frozen.running_mean.data.copy_(bn.running_mean) frozen.running_var.data.copy_(bn.running_var) setattr(m, attr_str, frozen) for n, ch in m.named_children(): replace_batch_norm(ch, n) # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder class ConditionalDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DetrFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ["timm"]) kwargs = {} if config.dilation: kwargs["output_stride"] = 16 backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=(1, 2, 3, 4), in_chans=config.num_channels, **kwargs, ) else: backbone = AutoBackbone.from_config(config.backbone_config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out # Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->ConditionalDetr class ConditionalDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: # position encoding pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return out, pos # Copied from transformers.models.detr.modeling_detr._expand_mask with Detr->ConditionalDetr def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): """ Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. """ batch_size, source_len = mask.size() target_len = target_len if target_len is not None else source_len expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) # Copied from transformers.models.detr.modeling_detr.DetrSinePositionEmbedding with Detr->ConditionalDetr class ConditionalDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: y_embed = y_embed / (y_embed[:, -1:, :] + 1e-6) * self.scale x_embed = x_embed / (x_embed[:, :, -1:] + 1e-6) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * torch_int_div(dim_t, 2) / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding with Detr->ConditionalDetr class ConditionalDetrLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->ConditionalDetr def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = ConditionalDetrSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = ConditionalDetrLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding # function to generate sine positional embedding for 2d coordinates def gen_sine_position_embeddings(pos_tensor): scale = 2 * math.pi dim_t = torch.arange(128, dtype=torch.float32, device=pos_tensor.device) dim_t = 10000 ** (2 * torch_int_div(dim_t, 2) / 128) x_embed = pos_tensor[:, :, 0] * scale y_embed = pos_tensor[:, :, 1] * scale pos_x = x_embed[:, :, None] / dim_t pos_y = y_embed[:, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=3).flatten(2) pos_y = torch.stack((pos_y[:, :, 0::2].sin(), pos_y[:, :, 1::2].cos()), dim=3).flatten(2) pos = torch.cat((pos_y, pos_x), dim=2) return pos def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrAttention class DetrAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, key_value_states: Optional[torch.Tensor] = None, key_value_position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # add key-value position embeddings to the key value states if key_value_position_embeddings is not None: key_value_states_original = key_value_states key_value_states = self.with_pos_embed(key_value_states, key_value_position_embeddings) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, batch_size) value_states = self._shape(self.v_proj(key_value_states_original), -1, batch_size) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class ConditionalDetrAttention(nn.Module): """ Cross-Attention used in Conditional DETR 'Conditional DETR for Fast Training Convergence' paper. The key q_proj, k_proj, v_proj are defined outside the attention. This attention allows the dim of q, k to be different to v. """ def __init__( self, embed_dim: int, out_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.out_dim = out_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) # head dimension of values self.v_head_dim = out_dim // num_heads if self.v_head_dim * num_heads != self.out_dim: raise ValueError( f"out_dim must be divisible by num_heads (got `out_dim`: {self.out_dim} and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.out_proj = nn.Linear(out_dim, out_dim, bias=bias) def _qk_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def _v_shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.v_head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, key_states: Optional[torch.Tensor] = None, value_states: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, _ = hidden_states.size() # get query proj query_states = hidden_states * self.scaling # get key, value proj key_states = self._qk_shape(key_states, -1, batch_size) value_states = self._v_shape(value_states, -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) v_proj_shape = (batch_size * self.num_heads, -1, self.v_head_dim) query_states = self._qk_shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*v_proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.v_head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.v_head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.v_head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, self.out_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.detr.modeling_detr.DetrEncoderLayer with DetrEncoderLayer->ConditionalDetrEncoderLayer,DetrConfig->ConditionalDetrConfig class ConditionalDetrEncoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetrAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. position_embeddings (`torch.FloatTensor`, *optional*): position embeddings, to be added to hidden_states. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class ConditionalDetrDecoderLayer(nn.Module): def __init__(self, config: ConditionalDetrConfig): super().__init__() self.embed_dim = config.d_model d_model = config.d_model # Decoder Self-Attention projections self.sa_qcontent_proj = nn.Linear(d_model, d_model) self.sa_qpos_proj = nn.Linear(d_model, d_model) self.sa_kcontent_proj = nn.Linear(d_model, d_model) self.sa_kpos_proj = nn.Linear(d_model, d_model) self.sa_v_proj = nn.Linear(d_model, d_model) self.self_attn = ConditionalDetrAttention( embed_dim=self.embed_dim, out_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # Decoder Cross-Attention projections self.ca_qcontent_proj = nn.Linear(d_model, d_model) self.ca_qpos_proj = nn.Linear(d_model, d_model) self.ca_kcontent_proj = nn.Linear(d_model, d_model) self.ca_kpos_proj = nn.Linear(d_model, d_model) self.ca_v_proj = nn.Linear(d_model, d_model) self.ca_qpos_sine_proj = nn.Linear(d_model, d_model) self.encoder_attn = ConditionalDetrAttention( self.embed_dim * 2, self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) self.nhead = config.decoder_attention_heads def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, query_position_embeddings: Optional[torch.Tensor] = None, query_sine_embed: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, is_first: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (`torch.FloatTensor`): attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. position_embeddings (`torch.FloatTensor`, *optional*): position embeddings that are added to the queries and keys in the cross-attention layer. query_position_embeddings (`torch.FloatTensor`, *optional*): position embeddings that are added to the queries and keys in the self-attention layer. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # ========== Begin of Self-Attention ============= # Apply projections here # shape: num_queries x batch_size x 256 q_content = self.sa_qcontent_proj( hidden_states ) # target is the input of the first decoder layer. zero by default. q_pos = self.sa_qpos_proj(query_position_embeddings) k_content = self.sa_kcontent_proj(hidden_states) k_pos = self.sa_kpos_proj(query_position_embeddings) v = self.sa_v_proj(hidden_states) _, num_queries, n_model = q_content.shape q = q_content + q_pos k = k_content + k_pos hidden_states, self_attn_weights = self.self_attn( hidden_states=q, attention_mask=attention_mask, key_states=k, value_states=v, output_attentions=output_attentions, ) # ============ End of Self-Attention ============= hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # ========== Begin of Cross-Attention ============= # Apply projections here # shape: num_queries x batch_size x 256 q_content = self.ca_qcontent_proj(hidden_states) k_content = self.ca_kcontent_proj(encoder_hidden_states) v = self.ca_v_proj(encoder_hidden_states) batch_size, num_queries, n_model = q_content.shape _, source_len, _ = k_content.shape k_pos = self.ca_kpos_proj(position_embeddings) # For the first decoder layer, we concatenate the positional embedding predicted from # the object query (the positional embedding) into the original query (key) in DETR. if is_first: q_pos = self.ca_qpos_proj(query_position_embeddings) q = q_content + q_pos k = k_content + k_pos else: q = q_content k = k_content q = q.view(batch_size, num_queries, self.nhead, n_model // self.nhead) query_sine_embed = self.ca_qpos_sine_proj(query_sine_embed) query_sine_embed = query_sine_embed.view(batch_size, num_queries, self.nhead, n_model // self.nhead) q = torch.cat([q, query_sine_embed], dim=3).view(batch_size, num_queries, n_model * 2) k = k.view(batch_size, source_len, self.nhead, n_model // self.nhead) k_pos = k_pos.view(batch_size, source_len, self.nhead, n_model // self.nhead) k = torch.cat([k, k_pos], dim=3).view(batch_size, source_len, n_model * 2) # Cross-Attention Block cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=q, attention_mask=encoder_attention_mask, key_states=k, value_states=v, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # ============ End of Cross-Attention ============= # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead with Detr->ConditionalDetr class ConditionalDetrClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with DetrMLPPredictionHead->MLP class MLP(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.detr.modeling_detr.DetrPreTrainedModel with Detr->ConditionalDetr class ConditionalDetrPreTrainedModel(PreTrainedModel): config_class = ConditionalDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module): std = self.config.init_std xavier_std = self.config.init_xavier_std if isinstance(module, ConditionalDetrMHAttentionMap): nn.init.zeros_(module.k_linear.bias) nn.init.zeros_(module.q_linear.bias) nn.init.xavier_uniform_(module.k_linear.weight, gain=xavier_std) nn.init.xavier_uniform_(module.q_linear.weight, gain=xavier_std) elif isinstance(module, ConditionalDetrLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) if isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ConditionalDetrDecoder): module.gradient_checkpointing = value CONDITIONAL_DETR_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ConditionalDetrConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CONDITIONAL_DETR_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ConditionalDetrImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.detr.modeling_detr.DetrEncoder with Detr->ConditionalDetr,DETR->ConditionalDETR class ConditionalDetrEncoder(ConditionalDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a [`ConditionalDetrEncoderLayer`]. The encoder updates the flattened feature map through multiple self-attention layers. Small tweak for ConditionalDETR: - position_embeddings are added to the forward pass. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) # in the original ConditionalDETR, no layernorm is used at the end of the encoder, as "normalize_before" is set to False by default # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: # we add position_embeddings as extra input to the encoder_layer layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class ConditionalDetrDecoder(ConditionalDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`ConditionalDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some small tweaks for Conditional DETR: - position_embeddings and query_position_embeddings are added to the forward pass. - if self.config.auxiliary_loss is set to True, also returns a stack of activations from all decoding layers. Args: config: ConditionalDetrConfig """ def __init__(self, config: ConditionalDetrConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.layers = nn.ModuleList([ConditionalDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) # in Conditional DETR, the decoder uses layernorm after the last decoder layer output self.layernorm = nn.LayerNorm(config.d_model) d_model = config.d_model self.gradient_checkpointing = False # query_scale is the FFN applied on f to generate transformation T self.query_scale = MLP(d_model, d_model, d_model, 2) self.ref_point_head = MLP(d_model, d_model, 2, 2) for layer_id in range(config.decoder_layers - 1): self.layers[layer_id + 1].ca_qpos_proj = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, query_position_embeddings=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): The query embeddings that are passed into the decoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on certain queries. Mask values selected in `[0, 1]`: - 1 for queries that are **not masked**, - 0 for queries that are **masked**. [What are attention masks?](../glossary#attention-mask) encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, encoder_sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, encoder_sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each cross-attention layer. query_position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): , *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds input_shape = inputs_embeds.size()[:-1] combined_attention_mask = None if attention_mask is not None and combined_attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] combined_attention_mask = combined_attention_mask + _expand_mask( attention_mask, inputs_embeds.dtype, target_len=input_shape[-1] ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] encoder_attention_mask = _expand_mask( encoder_attention_mask, inputs_embeds.dtype, target_len=input_shape[-1] ) # optional intermediate hidden states intermediate = () if self.config.auxiliary_loss else None # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None reference_points_before_sigmoid = self.ref_point_head( query_position_embeddings ) # [num_queries, batch_size, 2] reference_points = reference_points_before_sigmoid.sigmoid().transpose(0, 1) obj_center = reference_points[..., :2].transpose(0, 1) # get sine embedding for the query vector query_sine_embed_before_transformation = gen_sine_position_embeddings(obj_center) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue if idx == 0: pos_transformation = 1 else: pos_transformation = self.query_scale(hidden_states) # apply transformation query_sine_embed = query_sine_embed_before_transformation * pos_transformation if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, combined_attention_mask, position_embeddings, query_position_embeddings, query_sine_embed, encoder_hidden_states, encoder_attention_mask, None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=combined_attention_mask, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, query_sine_embed=query_sine_embed, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, is_first=(idx == 0), ) hidden_states = layer_outputs[0] if self.config.auxiliary_loss: hidden_states = self.layernorm(hidden_states) intermediate += (hidden_states,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # finally, apply layernorm hidden_states = self.layernorm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) # stack intermediate decoder activations if self.config.auxiliary_loss: intermediate = torch.stack(intermediate) if not return_dict: return tuple( v for v in [ hidden_states, all_hidden_states, all_self_attns, all_cross_attentions, intermediate, reference_points, ] if v is not None ) return ConditionalDetrDecoderOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, intermediate_hidden_states=intermediate, reference_points=reference_points, ) @add_start_docstrings( """ The bare Conditional DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrModel(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # Create backbone + positional encoding backbone = ConditionalDetrConvEncoder(config) position_embeddings = build_position_encoding(config) self.backbone = ConditionalDetrConvModel(backbone, position_embeddings) # Create projection layer self.input_projection = nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1) self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model) self.encoder = ConditionalDetrEncoder(config) self.decoder = ConditionalDetrDecoder(config) # Initialize weights and apply final processing self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModel.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # the last hidden states are the final query embeddings of the Transformer decoder >>> # these are of shape (batch_size, num_queries, hidden_size) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), device=device) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # pixel_values should be of shape (batch_size, num_channels, height, width) # pixel_mask should be of shape (batch_size, height, width) features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # get final feature map and downsampled mask feature_map, mask = features[-1] if mask is None: raise ValueError("Backbone does not return downsampled pixel mask") # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) projected_feature_map = self.input_projection(feature_map) # Third, flatten the feature map + position embeddings of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) position_embeddings = position_embeddings_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) # Fourth, sent flattened_features + flattened_mask + position embeddings through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + position embeddings through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.query_position_embeddings.weight.unsqueeze(0).repeat(batch_size, 1, 1) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.decoder( inputs_embeds=queries, attention_mask=None, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return ConditionalDetrModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, reference_points=decoder_outputs.reference_points, ) @add_start_docstrings( """ CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrForObjectDetection(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # CONDITIONAL DETR encoder-decoder model self.model = ConditionalDetrModel(config) # Object detection heads self.class_labels_classifier = nn.Linear( config.d_model, config.num_labels ) # We add one for the "no object" class self.bbox_predictor = ConditionalDetrMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) # Initialize weights and apply final processing self.post_init() # taken from https://github.com/Atten4Vis/conditionalDETR/blob/master/models/conditional_detr.py @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, AutoModelForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> model = AutoModelForObjectDetection.from_pretrained("microsoft/conditional-detr-resnet-50") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to COCO API >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected remote with confidence 0.833 at location [38.31, 72.1, 177.63, 118.45] Detected cat with confidence 0.831 at location [9.2, 51.38, 321.13, 469.0] Detected cat with confidence 0.804 at location [340.3, 16.85, 642.93, 370.95] Detected remote with confidence 0.683 at location [334.48, 73.49, 366.37, 190.01] Detected couch with confidence 0.535 at location [0.52, 1.19, 640.35, 475.1] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through CONDITIONAL_DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] # class logits + predicted bounding boxes logits = self.class_labels_classifier(sequence_output) reference = outputs.reference_points if return_dict else outputs[-1] reference_before_sigmoid = inverse_sigmoid(reference).transpose(0, 1) outputs_coords = [] hs = sequence_output tmp = self.bbox_predictor(hs) tmp[..., :2] += reference_before_sigmoid pred_boxes = tmp.sigmoid() # pred_boxes = self.bbox_predictor(sequence_output).sigmoid() loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = ConditionalDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = ConditionalDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_labels_classifier(intermediate) for lvl in range(hs.shape[0]): tmp = self.bbox_predictor(hs[lvl]) tmp[..., :2] += reference_before_sigmoid outputs_coord = tmp.sigmoid() outputs_coords.append(outputs_coord) outputs_coord = torch.stack(outputs_coords) auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": self.config.cls_loss_coefficient, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs return ((loss, loss_dict) + output) if loss is not None else output return ConditionalDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) @add_start_docstrings( """ CONDITIONAL_DETR Model (consisting of a backbone and encoder-decoder Transformer) with a segmentation head on top, for tasks such as COCO panoptic. """, CONDITIONAL_DETR_START_DOCSTRING, ) class ConditionalDetrForSegmentation(ConditionalDetrPreTrainedModel): def __init__(self, config: ConditionalDetrConfig): super().__init__(config) # object detection model self.conditional_detr = ConditionalDetrForObjectDetection(config) # segmentation head hidden_size, number_of_heads = config.d_model, config.encoder_attention_heads intermediate_channel_sizes = self.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes self.mask_head = ConditionalDetrMaskHeadSmallConv( hidden_size + number_of_heads, intermediate_channel_sizes[::-1][-3:], hidden_size ) self.bbox_attention = ConditionalDetrMHAttentionMap( hidden_size, hidden_size, number_of_heads, dropout=0.0, std=config.init_xavier_std ) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CONDITIONAL_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ConditionalDetrSegmentationOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss, DICE/F-1 loss and Focal loss. List of dicts, each dictionary containing at least the following 3 keys: 'class_labels', 'boxes' and 'masks' (the class labels, bounding boxes and segmentation masks of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)`, the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)` and the masks a `torch.FloatTensor` of shape `(number of bounding boxes in the image, height, width)`. Returns: Examples: ```python >>> import io >>> import requests >>> from PIL import Image >>> import torch >>> import numpy >>> from transformers import ( ... AutoImageProcessor, ... ConditionalDetrConfig, ... ConditionalDetrForSegmentation, ... ) >>> from transformers.image_transforms import rgb_to_id >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50") >>> # randomly initialize all weights of the model >>> config = ConditionalDetrConfig() >>> model = ConditionalDetrForSegmentation(config) >>> # prepare image for the model >>> inputs = image_processor(images=image, return_tensors="pt") >>> # forward pass >>> outputs = model(**inputs) >>> # Use the `post_process_panoptic_segmentation` method of the `image_processor` to retrieve post-processed panoptic segmentation maps >>> # Segmentation results are returned as a list of dictionaries >>> result = image_processor.post_process_panoptic_segmentation(outputs, target_sizes=[(300, 500)]) >>> # A tensor of shape (height, width) where each value denotes a segment id, filled with -1 if no segment is found >>> panoptic_seg = result[0]["segmentation"] >>> # Get prediction score and segment_id to class_id mapping of each segment >>> panoptic_segments_info = result[0]["segments_info"] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones((batch_size, height, width), device=device) # First, get list of feature maps and position embeddings features, position_embeddings_list = self.conditional_detr.model.backbone(pixel_values, pixel_mask=pixel_mask) # Second, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) feature_map, mask = features[-1] batch_size, num_channels, height, width = feature_map.shape projected_feature_map = self.conditional_detr.model.input_projection(feature_map) # Third, flatten the feature map + position embeddings of shape NxCxHxW to NxCxHW, and permute it to NxHWxC # In other words, turn their shape into (batch_size, sequence_length, hidden_size) flattened_features = projected_feature_map.flatten(2).permute(0, 2, 1) position_embeddings = position_embeddings_list[-1].flatten(2).permute(0, 2, 1) flattened_mask = mask.flatten(1) # Fourth, sent flattened_features + flattened_mask + position embeddings through encoder # flattened_features is a Tensor of shape (batch_size, heigth*width, hidden_size) # flattened_mask is a Tensor of shape (batch_size, heigth*width) if encoder_outputs is None: encoder_outputs = self.conditional_detr.model.encoder( inputs_embeds=flattened_features, attention_mask=flattened_mask, position_embeddings=position_embeddings, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, sent query embeddings + position embeddings through the decoder (which is conditioned on the encoder output) query_position_embeddings = self.conditional_detr.model.query_position_embeddings.weight.unsqueeze(0).repeat( batch_size, 1, 1 ) queries = torch.zeros_like(query_position_embeddings) # decoder outputs consists of (dec_features, dec_hidden, dec_attn) decoder_outputs = self.conditional_detr.model.decoder( inputs_embeds=queries, attention_mask=None, position_embeddings=position_embeddings, query_position_embeddings=query_position_embeddings, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=flattened_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = decoder_outputs[0] # Sixth, compute logits, pred_boxes and pred_masks logits = self.conditional_detr.class_labels_classifier(sequence_output) pred_boxes = self.conditional_detr.bbox_predictor(sequence_output).sigmoid() memory = encoder_outputs[0].permute(0, 2, 1).view(batch_size, self.config.d_model, height, width) mask = flattened_mask.view(batch_size, height, width) # FIXME h_boxes takes the last one computed, keep this in mind # important: we need to reverse the mask, since in the original implementation the mask works reversed # bbox_mask is of shape (batch_size, num_queries, number_of_attention_heads in bbox_attention, height/32, width/32) bbox_mask = self.bbox_attention(sequence_output, memory, mask=~mask) seg_masks = self.mask_head(projected_feature_map, bbox_mask, [features[2][0], features[1][0], features[0][0]]) pred_masks = seg_masks.view( batch_size, self.conditional_detr.config.num_queries, seg_masks.shape[-2], seg_masks.shape[-1] ) loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = ConditionalDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality", "masks"] criterion = ConditionalDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes outputs_loss["pred_masks"] = pred_masks if self.config.auxiliary_loss: intermediate = decoder_outputs.intermediate_hidden_states if return_dict else decoder_outputs[-1] outputs_class = self.class_labels_classifier(intermediate) outputs_coord = self.bbox_predictor(intermediate).sigmoid() auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient weight_dict["loss_mask"] = self.config.mask_loss_coefficient weight_dict["loss_dice"] = self.config.dice_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes, pred_masks) + auxiliary_outputs + decoder_outputs + encoder_outputs else: output = (logits, pred_boxes, pred_masks) + decoder_outputs + encoder_outputs return ((loss, loss_dict) + output) if loss is not None else output return ConditionalDetrSegmentationOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, pred_masks=pred_masks, auxiliary_outputs=auxiliary_outputs, last_hidden_state=decoder_outputs.last_hidden_state, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) def _expand(tensor, length: int): return tensor.unsqueeze(1).repeat(1, int(length), 1, 1, 1).flatten(0, 1) # Copied from transformers.models.detr.modeling_detr.DetrMaskHeadSmallConv with Detr->ConditionalDetr class ConditionalDetrMaskHeadSmallConv(nn.Module): """ Simple convolutional head, using group norm. Upsampling is done using a FPN approach """ def __init__(self, dim, fpn_dims, context_dim): super().__init__() if dim % 8 != 0: raise ValueError( "The hidden_size + number of attention heads must be divisible by 8 as the number of groups in" " GroupNorm is set to 8" ) inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64] self.lay1 = nn.Conv2d(dim, dim, 3, padding=1) self.gn1 = nn.GroupNorm(8, dim) self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1) self.gn2 = nn.GroupNorm(8, inter_dims[1]) self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1) self.gn3 = nn.GroupNorm(8, inter_dims[2]) self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1) self.gn4 = nn.GroupNorm(8, inter_dims[3]) self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1) self.gn5 = nn.GroupNorm(8, inter_dims[4]) self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1) self.dim = dim self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1) self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1) self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1) for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_uniform_(m.weight, a=1) nn.init.constant_(m.bias, 0) def forward(self, x: Tensor, bbox_mask: Tensor, fpns: List[Tensor]): # here we concatenate x, the projected feature map, of shape (batch_size, d_model, heigth/32, width/32) with # the bbox_mask = the attention maps of shape (batch_size, n_queries, n_heads, height/32, width/32). # We expand the projected feature map to match the number of heads. x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1) x = self.lay1(x) x = self.gn1(x) x = nn.functional.relu(x) x = self.lay2(x) x = self.gn2(x) x = nn.functional.relu(x) cur_fpn = self.adapter1(fpns[0]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay3(x) x = self.gn3(x) x = nn.functional.relu(x) cur_fpn = self.adapter2(fpns[1]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay4(x) x = self.gn4(x) x = nn.functional.relu(x) cur_fpn = self.adapter3(fpns[2]) if cur_fpn.size(0) != x.size(0): cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0)) x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest") x = self.lay5(x) x = self.gn5(x) x = nn.functional.relu(x) x = self.out_lay(x) return x # Copied from transformers.models.detr.modeling_detr.DetrMHAttentionMap with Detr->ConditionalDetr class ConditionalDetrMHAttentionMap(nn.Module): """This is a 2D attention module, which only returns the attention softmax (no multiplication by value)""" def __init__(self, query_dim, hidden_dim, num_heads, dropout=0.0, bias=True, std=None): super().__init__() self.num_heads = num_heads self.hidden_dim = hidden_dim self.dropout = nn.Dropout(dropout) self.q_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.k_linear = nn.Linear(query_dim, hidden_dim, bias=bias) self.normalize_fact = float(hidden_dim / self.num_heads) ** -0.5 def forward(self, q, k, mask: Optional[Tensor] = None): q = self.q_linear(q) k = nn.functional.conv2d(k, self.k_linear.weight.unsqueeze(-1).unsqueeze(-1), self.k_linear.bias) queries_per_head = q.view(q.shape[0], q.shape[1], self.num_heads, self.hidden_dim // self.num_heads) keys_per_head = k.view(k.shape[0], self.num_heads, self.hidden_dim // self.num_heads, k.shape[-2], k.shape[-1]) weights = torch.einsum("bqnc,bnchw->bqnhw", queries_per_head * self.normalize_fact, keys_per_head) if mask is not None: weights.masked_fill_(mask.unsqueeze(1).unsqueeze(1), torch.finfo(weights.dtype).min) weights = nn.functional.softmax(weights.flatten(2), dim=-1).view(weights.size()) weights = self.dropout(weights) return weights # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class ConditionalDetrLoss(nn.Module): """ This class computes the losses for ConditionalDetrForObjectDetection/ConditionalDetrForSegmentation. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`ConditionalDetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.__init__ def __init__(self, matcher, num_classes, focal_alpha, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_labels def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_masks def loss_masks(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the masks: the focal loss and the dice loss. Targets dicts must contain the key "masks" containing a tensor of dim [nb_target_boxes, h, w]. """ if "pred_masks" not in outputs: raise KeyError("No predicted masks found in outputs") source_idx = self._get_source_permutation_idx(indices) target_idx = self._get_target_permutation_idx(indices) source_masks = outputs["pred_masks"] source_masks = source_masks[source_idx] masks = [t["masks"] for t in targets] # TODO use valid to mask invalid areas due to padding in loss target_masks, valid = nested_tensor_from_tensor_list(masks).decompose() target_masks = target_masks.to(source_masks) target_masks = target_masks[target_idx] # upsample predictions to the target size source_masks = nn.functional.interpolate( source_masks[:, None], size=target_masks.shape[-2:], mode="bilinear", align_corners=False ) source_masks = source_masks[:, 0].flatten(1) target_masks = target_masks.flatten(1) target_masks = target_masks.view(source_masks.shape) losses = { "loss_mask": sigmoid_focal_loss(source_masks, target_masks, num_boxes), "loss_dice": dice_loss(source_masks, target_masks, num_boxes), } return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx # Copied from transformers.models.detr.modeling_detr.DetrLoss.get_loss def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, "masks": self.loss_masks, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) # Copied from transformers.models.detr.modeling_detr.DetrLoss.forward def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes across all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # (Niels): comment out function below, distributed training to be added # if is_dist_avail_and_initialized(): # torch.distributed.all_reduce(num_boxes) # (Niels) in original implementation, num_boxes is divided by get_world_size() num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: if loss == "masks": # Intermediate masks losses are too costly to compute, we ignore them. continue l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead with Detr->ConditionalDetr class ConditionalDetrMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrHungarianMatcher with DeformableDetr->ConditionalDetr class ConditionalDetrHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # Copied from transformers.models.detr.modeling_detr._max_by_axis def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.modeling_detr.NestedTensor class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) # Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask)
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/deformable_detr/image_processing_deformable_detr.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Deformable DETR.""" import io import pathlib import warnings from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from transformers.feature_extraction_utils import BatchFeature from transformers.image_processing_utils import BaseImageProcessor, get_size_dict from transformers.image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, normalize, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from transformers.image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_coco_detection_annotations, valid_coco_panoptic_annotations, valid_images, ) from transformers.utils import ( is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, ) from transformers.utils.generic import ExplicitEnum, TensorType if is_torch_available(): import torch from torch import nn if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats AnnotationType = Dict[str, Union[int, str, List[Dict]]] class AnnotionFormat(ExplicitEnum): COCO_DETECTION = "coco_detection" COCO_PANOPTIC = "coco_panoptic" SUPPORTED_ANNOTATION_FORMATS = (AnnotionFormat.COCO_DETECTION, AnnotionFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ image_size = get_image_size(input_image) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DeformableDetr def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool = False): """ Convert the target in COCO format into the format expected by DeformableDetr. """ image_height, image_width = get_image_size(image) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints[keep] if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DeformableDetr def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True ) -> Dict: """ Prepare a coco panoptic annotation for DeformableDetr. """ image_height, image_width = get_image_size(image) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image def get_segmentation_image( masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False ): h, w = input_size final_h, final_w = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img # Copied from transformers.models.detr.image_processing_detr.get_mask_area def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: final_h, final_w = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) scores, labels = scores.squeeze(-1), labels.squeeze(-1) return scores, labels # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample def post_process_panoptic_sample( out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85, ) -> Dict: """ Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample. Args: out_logits (`torch.Tensor`): The logits for this sample. masks (`torch.Tensor`): The predicted segmentation masks for this sample. boxes (`torch.Tensor`): The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y, width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding). processed_size (`Tuple[int, int]`): The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size after data augmentation but before batching. target_size (`Tuple[int, int]`): The target size of the image, `(height, width)` corresponding to the requested final size of the prediction. is_thing_map (`Dict`): A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not. threshold (`float`, *optional*, defaults to 0.85): The threshold used to binarize the segmentation masks. """ # we filter empty queries and detection below threshold scores, labels = score_labels_from_class_probabilities(out_logits) keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_boxes = center_to_corners_format(boxes[keep]) if len(cur_boxes) != len(cur_classes): raise ValueError("Not as many boxes as there are classes") cur_masks = masks[keep] cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) cur_masks = safe_squeeze(cur_masks, 1) b, h, w = cur_masks.shape # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.reshape(b, -1) stuff_equiv_classes = defaultdict(list) for k, label in enumerate(cur_classes): if not is_thing_map[label]: stuff_equiv_classes[label].append(k) seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) # We filter out any mask that is too small if cur_classes.size() > 0: # We know filter empty masks as long as we find some filtered_small = np.array([a <= 4 for a in area], dtype=bool) while filtered_small.any(): cur_masks = cur_masks[~filtered_small] cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) filtered_small = np.array([a <= 4 for a in area], dtype=bool) else: cur_classes = np.ones((1, 1), dtype=np.int64) segments_info = [ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a} for i, (cat, a) in enumerate(zip(cur_classes, area)) ] del cur_classes with io.BytesIO() as out: PIL.Image.fromarray(seg_img).save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} return predictions # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class DeformableDetrImageProcessor(BaseImageProcessor): r""" Constructs a Deformable DETR image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__ def __init__( self, format: Union[str, AnnotionFormat] = AnnotionFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None if size is None else 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad @property # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.max_size def max_size(self): warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) return self.size["longest_edge"] @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->DeformableDetr def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `DeformableDetrImageProcessor.from_pretrained(checkpoint, size=600, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "pad_and_return_pixel_mask" in kwargs: image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask") return super().from_dict(image_processor_dict, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DeformableDetr def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotionFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, ) -> Dict: """ Prepare an annotation for feeding into DeformableDetr model. """ format = format if format is not None else self.format if format == AnnotionFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks) elif format == AnnotionFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): warnings.warn( "The `prepare` method is deprecated and will be removed in a future version. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): warnings.warn("The `convert_coco_poly_to_mask` method is deprecated and will be removed in a future version. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection def prepare_coco_detection(self, *args, **kwargs): warnings.warn("The `prepare_coco_detection` method is deprecated and will be removed in a future version. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): warnings.warn("The `prepare_coco_panoptic` method is deprecated and will be removed in a future version. ") return prepare_coco_panoptic_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize(image, size=size, resample=resample, data_format=data_format) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: Union[float, int], data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List[ImageInput], return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> BatchFeature: """ Pads a batch of images with zeros to the size of largest height and width in the batch and returns their corresponding pixel mask. Args: images (`List[np.ndarray]`): Batch of images to pad. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ warnings.warn( "This method is deprecated and will be removed in v4.27.0. Please use pad instead.", FutureWarning ) # pad expects a list of np.ndarray, but the previous feature extractors expected torch tensors images = [to_numpy_array(image) for image in pixel_values_list] return self.pad( images=images, return_pixel_mask=True, return_tensors=return_tensors, data_format=data_format, ) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess def preprocess( self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotionFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotionation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotionation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotionFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`str` or `ChannelDimension`, *optional*, defaults to self.data_format): The channel dimension format of the image. If not provided, it will be the same as the input image. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead.", FutureWarning, ) do_pad = kwargs.pop("pad_and_return_pixel_mask") max_size = None if "max_size" in kwargs: warnings.warn( "The `max_size` argument is deprecated and will be removed in a future version, use" " `size['longest_edge']` instead.", FutureWarning, ) size = kwargs.pop("max_size") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, max_size=max_size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") images = make_list_of_images(images) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotionFormat(format) if annotations is not None: if format == AnnotionFormat.COCO_DETECTION and not valid_coco_detection_annotations(annotations): raise ValueError( "Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts" "(batch of images) with the following keys: `image_id` and `annotations`, with the latter " "being a list of annotations in the COCO format." ) elif format == AnnotionFormat.COCO_PANOPTIC and not valid_coco_panoptic_annotations(annotations): raise ValueError( "Invalid COCO panoptic annotations. Annotations must a dict (single image) of list of dicts " "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with " "the latter being a list of annotations in the COCO format." ) elif format not in SUPPORTED_ANNOTATION_FORMATS: raise ValueError( f"Unsupported annotation format: {format} must be one of {SUPPORTED_ANNOTATION_FORMATS}" ) if ( masks_path is not None and format == AnnotionFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image) resized_image = self.resize(image, size=size, max_size=max_size, resample=resample) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std) for image in images] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad(images, return_pixel_mask=True, data_format=data_format) else: images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs # POSTPROCESSING METHODS - TODO: add support for other frameworks def post_process(self, outputs, target_sizes): """ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DeformableDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`.", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = topk_indexes // out_logits.shape[2] labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Deformable DETR.""" import io import pathlib import warnings from collections import defaultdict from typing import Any, Callable, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from transformers.feature_extraction_utils import BatchFeature from transformers.image_processing_utils import BaseImageProcessor, get_size_dict from transformers.image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, id_to_rgb, normalize, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from transformers.image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, to_numpy_array, valid_coco_detection_annotations, valid_coco_panoptic_annotations, valid_images, ) from transformers.utils import ( is_flax_available, is_jax_tensor, is_scipy_available, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_vision_available, ) from transformers.utils.generic import ExplicitEnum, TensorType if is_torch_available(): import torch from torch import nn from ...pytorch_utils import torch_int_div if is_vision_available(): import PIL if is_scipy_available(): import scipy.special import scipy.stats AnnotationType = Dict[str, Union[int, str, List[Dict]]] class AnnotionFormat(ExplicitEnum): COCO_DETECTION = "coco_detection" COCO_PANOPTIC = "coco_panoptic" SUPPORTED_ANNOTATION_FORMATS = (AnnotionFormat.COCO_DETECTION, AnnotionFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ image_size = get_image_size(input_image) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DeformableDetr def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool = False): """ Convert the target in COCO format into the format expected by DeformableDetr. """ image_height, image_width = get_image_size(image) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints[keep] if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DeformableDetr def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True ) -> Dict: """ Prepare a coco panoptic annotation for DeformableDetr. """ image_height, image_width = get_image_size(image) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.get_segmentation_image def get_segmentation_image( masks: np.ndarray, input_size: Tuple, target_size: Tuple, stuff_equiv_classes, deduplicate=False ): h, w = input_size final_h, final_w = target_size m_id = scipy.special.softmax(masks.transpose(0, 1), -1) if m_id.shape[-1] == 0: # We didn't detect any mask :( m_id = np.zeros((h, w), dtype=np.int64) else: m_id = m_id.argmax(-1).reshape(h, w) if deduplicate: # Merge the masks corresponding to the same stuff class for equiv in stuff_equiv_classes.values(): for eq_id in equiv: m_id[m_id == eq_id] = equiv[0] seg_img = id_to_rgb(m_id) seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST) return seg_img # Copied from transformers.models.detr.image_processing_detr.get_mask_area def get_mask_area(seg_img: np.ndarray, target_size: Tuple[int, int], n_classes: int) -> np.ndarray: final_h, final_w = target_size np_seg_img = seg_img.astype(np.uint8) np_seg_img = np_seg_img.reshape(final_h, final_w, 3) m_id = rgb_to_id(np_seg_img) area = [(m_id == i).sum() for i in range(n_classes)] return area # Copied from transformers.models.detr.image_processing_detr.score_labels_from_class_probabilities def score_labels_from_class_probabilities(logits: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: probs = scipy.special.softmax(logits, axis=-1) labels = probs.argmax(-1, keepdims=True) scores = np.take_along_axis(probs, labels, axis=-1) scores, labels = scores.squeeze(-1), labels.squeeze(-1) return scores, labels # Copied from transformers.models.detr.image_processing_detr.post_process_panoptic_sample def post_process_panoptic_sample( out_logits: np.ndarray, masks: np.ndarray, boxes: np.ndarray, processed_size: Tuple[int, int], target_size: Tuple[int, int], is_thing_map: Dict, threshold=0.85, ) -> Dict: """ Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample. Args: out_logits (`torch.Tensor`): The logits for this sample. masks (`torch.Tensor`): The predicted segmentation masks for this sample. boxes (`torch.Tensor`): The prediced bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y, width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding). processed_size (`Tuple[int, int]`): The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size after data augmentation but before batching. target_size (`Tuple[int, int]`): The target size of the image, `(height, width)` corresponding to the requested final size of the prediction. is_thing_map (`Dict`): A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not. threshold (`float`, *optional*, defaults to 0.85): The threshold used to binarize the segmentation masks. """ # we filter empty queries and detection below threshold scores, labels = score_labels_from_class_probabilities(out_logits) keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold) cur_scores = scores[keep] cur_classes = labels[keep] cur_boxes = center_to_corners_format(boxes[keep]) if len(cur_boxes) != len(cur_classes): raise ValueError("Not as many boxes as there are classes") cur_masks = masks[keep] cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR) cur_masks = safe_squeeze(cur_masks, 1) b, h, w = cur_masks.shape # It may be that we have several predicted masks for the same stuff class. # In the following, we track the list of masks ids for each stuff class (they are merged later on) cur_masks = cur_masks.reshape(b, -1) stuff_equiv_classes = defaultdict(list) for k, label in enumerate(cur_classes): if not is_thing_map[label]: stuff_equiv_classes[label].append(k) seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores)) # We filter out any mask that is too small if cur_classes.size() > 0: # We know filter empty masks as long as we find some filtered_small = np.array([a <= 4 for a in area], dtype=bool) while filtered_small.any(): cur_masks = cur_masks[~filtered_small] cur_scores = cur_scores[~filtered_small] cur_classes = cur_classes[~filtered_small] seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True) area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores)) filtered_small = np.array([a <= 4 for a in area], dtype=bool) else: cur_classes = np.ones((1, 1), dtype=np.int64) segments_info = [ {"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a} for i, (cat, a) in enumerate(zip(cur_classes, area)) ] del cur_classes with io.BytesIO() as out: PIL.Image.fromarray(seg_img).save(out, format="PNG") predictions = {"png_string": out.getvalue(), "segments_info": segments_info} return predictions # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments class DeformableDetrImageProcessor(BaseImageProcessor): r""" Constructs a Deformable DETR image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.__init__ def __init__( self, format: Union[str, AnnotionFormat] = AnnotionFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None if size is None else 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, max_size=max_size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad @property # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.max_size def max_size(self): warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) return self.size["longest_edge"] @classmethod # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.from_dict with Detr->DeformableDetr def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `DeformableDetrImageProcessor.from_pretrained(checkpoint, size=600, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "pad_and_return_pixel_mask" in kwargs: image_processor_dict["pad_and_return_pixel_mask"] = kwargs.pop("pad_and_return_pixel_mask") return super().from_dict(image_processor_dict, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DeformableDetr def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotionFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, ) -> Dict: """ Prepare an annotation for feeding into DeformableDetr model. """ format = format if format is not None else self.format if format == AnnotionFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks) elif format == AnnotionFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): warnings.warn( "The `prepare` method is deprecated and will be removed in a future version. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): warnings.warn("The `convert_coco_poly_to_mask` method is deprecated and will be removed in a future version. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection def prepare_coco_detection(self, *args, **kwargs): warnings.warn("The `prepare_coco_detection` method is deprecated and will be removed in a future version. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): warnings.warn("The `prepare_coco_panoptic` method is deprecated and will be removed in a future version. ") return prepare_coco_panoptic_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.26. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize(image, size=size, resample=resample, data_format=data_format) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: Union[float, int], data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List[ImageInput], return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> BatchFeature: """ Pads a batch of images with zeros to the size of largest height and width in the batch and returns their corresponding pixel mask. Args: images (`List[np.ndarray]`): Batch of images to pad. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ warnings.warn( "This method is deprecated and will be removed in v4.27.0. Please use pad instead.", FutureWarning ) # pad expects a list of np.ndarray, but the previous feature extractors expected torch tensors images = [to_numpy_array(image) for image in pixel_values_list] return self.pad( images=images, return_pixel_mask=True, return_tensors=return_tensors, data_format=data_format, ) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.preprocess def preprocess( self, images: ImageInput, annotations: Optional[Union[AnnotationType, List[AnnotationType]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotionFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. annotations (`AnnotationType` or `List[AnnotationType]`, *optional*): List of annotations associated with the image or batch of images. If annotionation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotionation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotionFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`str` or `ChannelDimension`, *optional*, defaults to self.data_format): The channel dimension format of the image. If not provided, it will be the same as the input image. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead.", FutureWarning, ) do_pad = kwargs.pop("pad_and_return_pixel_mask") max_size = None if "max_size" in kwargs: warnings.warn( "The `max_size` argument is deprecated and will be removed in a future version, use" " `size['longest_edge']` instead.", FutureWarning, ) size = kwargs.pop("max_size") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, max_size=max_size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") images = make_list_of_images(images) if annotations is not None and isinstance(annotations, dict): annotations = [annotations] if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotionFormat(format) if annotations is not None: if format == AnnotionFormat.COCO_DETECTION and not valid_coco_detection_annotations(annotations): raise ValueError( "Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts" "(batch of images) with the following keys: `image_id` and `annotations`, with the latter " "being a list of annotations in the COCO format." ) elif format == AnnotionFormat.COCO_PANOPTIC and not valid_coco_panoptic_annotations(annotations): raise ValueError( "Invalid COCO panoptic annotations. Annotations must a dict (single image) of list of dicts " "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with " "the latter being a list of annotations in the COCO format." ) elif format not in SUPPORTED_ANNOTATION_FORMATS: raise ValueError( f"Unsupported annotation format: {format} must be one of {SUPPORTED_ANNOTATION_FORMATS}" ) if ( masks_path is not None and format == AnnotionFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image) resized_image = self.resize(image, size=size, max_size=max_size, resample=resample) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std) for image in images] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad(images, return_pixel_mask=True, data_format=data_format) else: images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs # POSTPROCESSING METHODS - TODO: add support for other frameworks def post_process(self, outputs, target_sizes): """ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DeformableDetrObjectDetectionOutput`]): Raw outputs of the model. target_sizes (`torch.Tensor` of shape `(batch_size, 2)`): Tensor containing the size (height, width) of each image of the batch. For evaluation, this must be the original image size (before any data augmentation). For visualization, this should be the image size after data augment, but before padding. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ warnings.warn( "`post_process` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_object_detection`.", FutureWarning, ) out_logits, out_bbox = outputs.logits, outputs.pred_boxes if len(out_logits) != len(target_sizes): raise ValueError("Make sure that you pass in as many target sizes as the batch dimension of the logits") if target_sizes.shape[1] != 2: raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch") prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = torch_int_div(topk_indexes, out_logits.shape[2]) labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1) boxes = boxes * scale_fct[:, None, :] results = [{"scores": s, "labels": l, "boxes": b} for s, l, b in zip(scores, labels, boxes)] return results def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None ): """ Converts the raw output of [`DeformableDetrForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() topk_values, topk_indexes = torch.topk(prob.view(out_logits.shape[0], -1), 100, dim=1) scores = topk_values topk_boxes = torch_int_div(topk_indexes, out_logits.shape[2]) labels = topk_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for s, l, b in zip(scores, labels, boxes): score = s[s > threshold] label = l[s > threshold] box = b[s > threshold] results.append({"scores": score, "labels": label, "boxes": box}) return results
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/deformable_detr/modeling_deformable_detr.py
# coding=utf-8 # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Deformable DETR model.""" import copy import math import warnings from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from torch.autograd import Function from torch.autograd.function import once_differentiable from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_timm_available, is_torch_cuda_available, is_vision_available, replace_return_docstrings, requires_backends, ) from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import is_ninja_available, logging from ..auto import AutoBackbone from .configuration_deformable_detr import DeformableDetrConfig from .load_custom import load_cuda_kernels logger = logging.get_logger(__name__) # Move this to not compile only when importing, this needs to happen later, like in __init__. if is_torch_cuda_available() and is_ninja_available(): logger.info("Loading custom CUDA kernels...") try: MultiScaleDeformableAttention = load_cuda_kernels() except Exception as e: logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") MultiScaleDeformableAttention = None else: MultiScaleDeformableAttention = None if is_vision_available(): from transformers.image_transforms import center_to_corners_format class MultiScaleDeformableAttentionFunction(Function): @staticmethod def forward( context, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step, ): context.im2col_step = im2col_step output = MultiScaleDeformableAttention.ms_deform_attn_forward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, context.im2col_step, ) context.save_for_backward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights ) return output @staticmethod @once_differentiable def backward(context, grad_output): ( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ) = context.saved_tensors grad_value, grad_sampling_loc, grad_attn_weight = MultiScaleDeformableAttention.ms_deform_attn_backward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, context.im2col_step, ) return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_timm_available(): from timm import create_model logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DeformableDetrConfig" _CHECKPOINT_FOR_DOC = "sensetime/deformable-detr" DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ "sensetime/deformable-detr", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr ] @dataclass class DeformableDetrDecoderOutput(ModelOutput): """ Base class for outputs of the DeformableDetrDecoder. This class adds two attributes to BaseModelOutputWithCrossAttentions, namely: - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) - a stacked tensor of intermediate reference points. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`): Stacked intermediate reference points (reference points of each layer of the decoder). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class DeformableDetrModelOutput(ModelOutput): """ Base class for outputs of the Deformable DETR encoder-decoder model. Args: init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ init_reference_points: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None @dataclass class DeformableDetrObjectDetectionOutput(ModelOutput): """ Output type of [`DeformableDetrForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DeformableDetrProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None init_reference_points: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional = None enc_outputs_coord_logits: Optional = None def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->DeformableDetr class DeformableDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DeformableDetr def replace_batch_norm(m, name=""): for attr_str in dir(m): target_attr = getattr(m, attr_str) if isinstance(target_attr, nn.BatchNorm2d): frozen = DeformableDetrFrozenBatchNorm2d(target_attr.num_features) bn = getattr(m, attr_str) frozen.weight.data.copy_(bn.weight) frozen.bias.data.copy_(bn.bias) frozen.running_mean.data.copy_(bn.running_mean) frozen.running_var.data.copy_(bn.running_var) setattr(m, attr_str, frozen) for n, ch in m.named_children(): replace_batch_norm(ch, n) class DeformableDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DeformableDetrFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ["timm"]) kwargs = {} if config.dilation: kwargs["output_stride"] = 16 backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=(2, 3, 4) if config.num_feature_levels > 1 else (4,), in_chans=config.num_channels, **kwargs, ) else: backbone = AutoBackbone.from_config(config.backbone_config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->DeformableDetr def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out # Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->DeformableDetr class DeformableDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: # position encoding pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return out, pos # Copied from transformers.models.detr.modeling_detr._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): """ Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. """ batch_size, source_len = mask.size() target_len = target_len if target_len is not None else source_len expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) class DeformableDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: eps = 1e-6 y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * (dim_t // 2) / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding class DeformableDetrLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->DeformableDetr def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = DeformableDetrSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = DeformableDetrLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding def multi_scale_deformable_attention( value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor ) -> Tensor: batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height * width for height, width in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() class DeformableDetrMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int): super().__init__() if embed_dim % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}" ) dim_per_head = embed_dim // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = embed_dim self.n_levels = n_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2) self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points) self.value_proj = nn.Linear(embed_dim, embed_dim) self.output_proj = nn.Linear(embed_dim, embed_dim) self._reset_parameters() def _reset_parameters(self): nn.init.constant_(self.sampling_offsets.weight.data, 0.0) thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(self.n_heads, 1, 1, 2) .repeat(1, self.n_levels, self.n_points, 1) ) for i in range(self.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) nn.init.constant_(self.attention_weights.weight.data, 0.0) nn.init.constant_(self.attention_weights.bias.data, 0.0) nn.init.xavier_uniform_(self.value_proj.weight.data) nn.init.constant_(self.value_proj.bias.data, 0.0) nn.init.xavier_uniform_(self.output_proj.weight.data) nn.init.constant_(self.output_proj.bias.data, 0.0) def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 if reference_points.shape[-1] == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif reference_points.shape[-1] == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") try: # custom kernel output = MultiScaleDeformableAttentionFunction.apply( value, spatial_shapes, level_start_index, sampling_locations, attention_weights, self.im2col_step, ) except Exception: # PyTorch implementation output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) output = self.output_proj(output) return output, attention_weights class DeformableDetrMultiheadAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # get queries, keys and values query_states = self.q_proj(hidden_states) * self.scaling key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _expand_mask(attention_mask, hidden_states.dtype) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class DeformableDetrEncoderLayer(nn.Module): def __init__(self, config: DeformableDetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DeformableDetrMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.encoder_n_points, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class DeformableDetrDecoderLayer(nn.Module): def __init__(self, config: DeformableDetrConfig): super().__init__() self.embed_dim = config.d_model # self-attention self.self_attn = DeformableDetrMultiheadAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # cross-attention self.encoder_attn = DeformableDetrMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.decoder_n_points, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) # feedforward neural networks self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(seq_len, batch, embed_dim)`. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings that are added to the queries and keys in the self-attention layer. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes. level_start_index (`torch.LongTensor`, *optional*): Level start index. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states # Cross-Attention cross_attn_weights = None hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead class DeformableDetrClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class DeformableDetrPreTrainedModel(PreTrainedModel): config_class = DeformableDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module): std = self.config.init_std if isinstance(module, DeformableDetrLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) elif isinstance(module, DeformableDetrMultiscaleDeformableAttention): module._reset_parameters() elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if hasattr(module, "reference_points") and not self.config.two_stage: nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) nn.init.constant_(module.reference_points.bias.data, 0.0) if hasattr(module, "level_embed"): nn.init.normal_(module.level_embed) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, DeformableDetrDecoder): module.gradient_checkpointing = value DEFORMABLE_DETR_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`DeformableDetrConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DEFORMABLE_DETR_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DeformableDetrImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ class DeformableDetrEncoder(DeformableDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a [`DeformableDetrEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. Args: config: DeformableDetrConfig """ def __init__(self, config: DeformableDetrConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DeformableDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) # Initialize weights and apply final processing self.post_init() @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """ Get reference points for each feature map. Used in decoder. Args: spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Valid ratios of each feature map. device (`torch.device`): Device on which to create the tensors. Returns: `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` """ reference_points_list = [] for level, (height, width) in enumerate(spatial_shapes): ref_y, ref_x = meshgrid( torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing="ij", ) # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class DeformableDetrDecoder(DeformableDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DeformableDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some tweaks for Deformable DETR: - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass. - it also returns a stack of intermediate outputs and reference points from all decoding layers. Args: config: DeformableDetrConfig """ def __init__(self, config: DeformableDetrConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DeformableDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False # hack implementation for iterative bounding box refinement and two-stage Deformable DETR self.bbox_embed = None self.class_embed = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, reference_points=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of the feature maps. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): Indexes for the start of each feature level. In range `[0, sequence_length]`. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None intermediate = () intermediate_reference_points = () for idx, decoder_layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = ( reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] ) else: if reference_points.shape[-1] != 2: raise ValueError("Reference points' last dimension must be of size 2") reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, encoder_hidden_states, encoder_attention_mask, None, ) else: layer_outputs = decoder_layer( hidden_states, position_embeddings=position_embeddings, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points_input, spatial_shapes=spatial_shapes, level_start_index=level_start_index, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] # hack implementation for iterative bounding box refinement if self.bbox_embed is not None: tmp = self.bbox_embed[idx](hidden_states) if reference_points.shape[-1] == 4: new_reference_points = tmp + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() else: if reference_points.shape[-1] != 2: raise ValueError( f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}" ) new_reference_points = tmp new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() intermediate += (hidden_states,) intermediate_reference_points += (reference_points,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # Keep batch_size as first dimension intermediate = torch.stack(intermediate, dim=1) intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, intermediate, intermediate_reference_points, all_hidden_states, all_self_attns, all_cross_attentions, ] if v is not None ) return DeformableDetrDecoderOutput( last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """ The bare Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, DEFORMABLE_DETR_START_DOCSTRING, ) class DeformableDetrModel(DeformableDetrPreTrainedModel): def __init__(self, config: DeformableDetrConfig): super().__init__(config) # Create backbone + positional encoding backbone = DeformableDetrConvEncoder(config) position_embeddings = build_position_encoding(config) self.backbone = DeformableDetrConvModel(backbone, position_embeddings) # Create input projection layers if config.num_feature_levels > 1: num_backbone_outs = len(backbone.intermediate_channel_sizes) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.intermediate_channel_sizes[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ) for _ in range(config.num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model), ) ) in_channels = config.d_model self.input_proj = nn.ModuleList(input_proj_list) else: self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ] ) if not config.two_stage: self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2) self.encoder = DeformableDetrEncoder(config) self.decoder = DeformableDetrDecoder(config) self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) if config.two_stage: self.enc_output = nn.Linear(config.d_model, config.d_model) self.enc_output_norm = nn.LayerNorm(config.d_model) self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2) self.pos_trans_norm = nn.LayerNorm(config.d_model * 2) else: self.reference_points = nn.Linear(config.d_model, 2) self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) def get_valid_ratio(self, mask): """Get the valid ratio of all feature maps.""" _, height, width = mask.shape valid_height = torch.sum(mask[:, :, 0], 1) valid_width = torch.sum(mask[:, 0, :], 1) valid_ratio_heigth = valid_height.float() / height valid_ratio_width = valid_width.float() / width valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) return valid_ratio def get_proposal_pos_embed(self, proposals): """Get the position embedding of the proposals.""" num_pos_feats = 128 temperature = 10000 scale = 2 * math.pi dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device) dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats) # batch_size, num_queries, 4 proposals = proposals.sigmoid() * scale # batch_size, num_queries, 4, 128 pos = proposals[:, :, :, None] / dim_t # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512 pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) return pos def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): """Generate the encoder output proposals from encoded enc_output. Args: enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder. padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`. spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps. Returns: `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to directly predict a bounding box. (without the need of a decoder) - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse sigmoid. """ batch_size = enc_output.shape[0] proposals = [] _cur = 0 for level, (height, width) in enumerate(spatial_shapes): mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1) valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) grid_y, grid_x = meshgrid( torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing="ij", ) grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) proposals.append(proposal) _cur += height * width output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) # assign each pixel as an object query object_query = enc_output object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) object_query = object_query.masked_fill(~output_proposals_valid, float(0)) object_query = self.enc_output_norm(self.enc_output(object_query)) return object_query, output_proposals @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DeformableDetrModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DeformableDetrModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") >>> model = DeformableDetrModel.from_pretrained("SenseTime/deformable-detr") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # which is a list of tuples features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) sources = [] masks = [] for level, (source, mask) in enumerate(features): sources.append(self.input_proj[level](source)) masks.append(mask) if mask is None: raise ValueError("No attention mask was provided") # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage if self.config.num_feature_levels > len(sources): _len_sources = len(sources) for level in range(_len_sources, self.config.num_feature_levels): if level == _len_sources: source = self.input_proj[level](features[-1][0]) else: source = self.input_proj[level](sources[-1]) mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) sources.append(source) masks.append(mask) position_embeddings_list.append(pos_l) # Create queries query_embeds = None if not self.config.two_stage: query_embeds = self.query_position_embeddings.weight # Prepare encoder inputs (by flattening) source_flatten = [] mask_flatten = [] lvl_pos_embed_flatten = [] spatial_shapes = [] for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)): batch_size, num_channels, height, width = source.shape spatial_shape = (height, width) spatial_shapes.append(spatial_shape) source = source.flatten(2).transpose(1, 2) mask = mask.flatten(1) pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) source_flatten.append(source) mask_flatten.append(mask) source_flatten = torch.cat(source_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) valid_ratios = valid_ratios.float() # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder # Also provide spatial_shapes, level_start_index and valid_ratios if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, prepare decoder inputs batch_size, _, num_channels = encoder_outputs[0].shape enc_outputs_class = None enc_outputs_coord_logits = None if self.config.two_stage: object_query_embedding, output_proposals = self.gen_encoder_output_proposals( encoder_outputs[0], ~mask_flatten, spatial_shapes ) # hack implementation for two-stage Deformable DETR # apply a detection head to each pixel (A.4 in paper) # linear projection for bounding box binary classification (i.e. foreground and background) enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding) # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding) enc_outputs_coord_logits = delta_bbox + output_proposals # only keep top scoring `config.two_stage_num_proposals` proposals topk = self.config.two_stage_num_proposals topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] topk_coords_logits = torch.gather( enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) ) topk_coords_logits = topk_coords_logits.detach() reference_points = topk_coords_logits.sigmoid() init_reference_points = reference_points pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits))) query_embed, target = torch.split(pos_trans_out, num_channels, dim=2) else: query_embed, target = torch.split(query_embeds, num_channels, dim=1) query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1) target = target.unsqueeze(0).expand(batch_size, -1, -1) reference_points = self.reference_points(query_embed).sigmoid() init_reference_points = reference_points decoder_outputs = self.decoder( inputs_embeds=target, position_embeddings=query_embed, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs return tuple_outputs return DeformableDetrModelOutput( init_reference_points=init_reference_points, last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, ) @add_start_docstrings( """ Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, DEFORMABLE_DETR_START_DOCSTRING, ) class DeformableDetrForObjectDetection(DeformableDetrPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _keys_to_ignore_on_load_missing = ["bbox_embed\.[1-9]\d*", "class_embed\.[1-9]\d*"] def __init__(self, config: DeformableDetrConfig): super().__init__(config) # Deformable DETR encoder-decoder model self.model = DeformableDetrModel(config) # Detection heads on top self.class_embed = nn.Linear(config.d_model, config.num_labels) self.bbox_embed = DeformableDetrMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) # if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers if config.with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) # hack implementation for iterative bounding box refinement self.model.decoder.bbox_embed = self.bbox_embed else: nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) self.model.decoder.bbox_embed = None if config.two_stage: # hack implementation for two-stage self.model.decoder.class_embed = self.class_embed for box_embed in self.bbox_embed: nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) # Initialize weights and apply final processing self.post_init() # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DeformableDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DeformableDetrForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") >>> model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to COCO API >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected cat with confidence 0.8 at location [16.5, 52.84, 318.25, 470.78] Detected cat with confidence 0.789 at location [342.19, 24.3, 640.02, 372.25] Detected remote with confidence 0.633 at location [40.79, 72.78, 176.76, 117.25] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] init_reference = outputs.init_reference_points if return_dict else outputs[0] inter_references = outputs.intermediate_reference_points if return_dict else outputs[3] # class logits + predicted bounding boxes outputs_classes = [] outputs_coords = [] for level in range(hidden_states.shape[1]): if level == 0: reference = init_reference else: reference = inter_references[:, level - 1] reference = inverse_sigmoid(reference) outputs_class = self.class_embed[level](hidden_states[:, level]) delta_bbox = self.bbox_embed[level](hidden_states[:, level]) if reference.shape[-1] == 4: outputs_coord_logits = delta_bbox + reference elif reference.shape[-1] == 2: delta_bbox[..., :2] += reference outputs_coord_logits = delta_bbox else: raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") outputs_coord = outputs_coord_logits.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) # Keep batch_size as first dimension outputs_class = torch.stack(outputs_classes, dim=1) outputs_coord = torch.stack(outputs_coords, dim=1) logits = outputs_class[:, -1] pred_boxes = outputs_coord[:, -1] loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = DeformableDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = DeformableDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if self.config.auxiliary_loss: auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs if self.config.two_stage: enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid() outputs["enc_outputs"] = {"pred_logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord} loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output return tuple_outputs dict_outputs = DeformableDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, ) return dict_outputs # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class DeformableDetrLoss(nn.Module): """ This class computes the losses for `DeformableDetrForObjectDetection`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`DeformableDetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__(self, matcher, num_classes, focal_alpha, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses # removed logging parameter, which was part of the original implementation def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # (Niels): comment out function below, distributed training to be added # if is_dist_avail_and_initialized(): # torch.distributed.all_reduce(num_boxes) # (Niels) in original implementation, num_boxes is divided by get_world_size() num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) if "enc_outputs" in outputs: enc_outputs = outputs["enc_outputs"] bin_targets = copy.deepcopy(targets) for bt in bin_targets: bt["labels"] = torch.zeros_like(bt["labels"]) indices = self.matcher(enc_outputs, bin_targets) for loss in self.losses: kwargs = {} if loss == "labels": # Logging is enabled only for the last layer kwargs["log"] = False l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs) l_dict = {k + "_enc": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead class DeformableDetrMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class DeformableDetrHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # Copied from transformers.models.detr.modeling_detr._max_by_axis def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.modeling_detr.NestedTensor class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) # Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask)
# coding=utf-8 # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Deformable DETR model.""" import copy import math import warnings from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from torch.autograd import Function from torch.autograd.function import once_differentiable from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_timm_available, is_torch_cuda_available, is_vision_available, replace_return_docstrings, requires_backends, ) from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid, torch_int_div from ...utils import is_ninja_available, logging from ..auto import AutoBackbone from .configuration_deformable_detr import DeformableDetrConfig from .load_custom import load_cuda_kernels logger = logging.get_logger(__name__) # Move this to not compile only when importing, this needs to happen later, like in __init__. if is_torch_cuda_available() and is_ninja_available(): logger.info("Loading custom CUDA kernels...") try: MultiScaleDeformableAttention = load_cuda_kernels() except Exception as e: logger.warning(f"Could not load the custom kernel for multi-scale deformable attention: {e}") MultiScaleDeformableAttention = None else: MultiScaleDeformableAttention = None if is_vision_available(): from transformers.image_transforms import center_to_corners_format class MultiScaleDeformableAttentionFunction(Function): @staticmethod def forward( context, value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, im2col_step, ): context.im2col_step = im2col_step output = MultiScaleDeformableAttention.ms_deform_attn_forward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, context.im2col_step, ) context.save_for_backward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights ) return output @staticmethod @once_differentiable def backward(context, grad_output): ( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, ) = context.saved_tensors grad_value, grad_sampling_loc, grad_attn_weight = MultiScaleDeformableAttention.ms_deform_attn_backward( value, value_spatial_shapes, value_level_start_index, sampling_locations, attention_weights, grad_output, context.im2col_step, ) return grad_value, None, None, grad_sampling_loc, grad_attn_weight, None if is_scipy_available(): from scipy.optimize import linear_sum_assignment if is_timm_available(): from timm import create_model logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DeformableDetrConfig" _CHECKPOINT_FOR_DOC = "sensetime/deformable-detr" DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = [ "sensetime/deformable-detr", # See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr ] @dataclass class DeformableDetrDecoderOutput(ModelOutput): """ Base class for outputs of the DeformableDetrDecoder. This class adds two attributes to BaseModelOutputWithCrossAttentions, namely: - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) - a stacked tensor of intermediate reference points. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`): Stacked intermediate reference points (reference points of each layer of the decoder). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class DeformableDetrModelOutput(ModelOutput): """ Base class for outputs of the Deformable DETR encoder-decoder model. Args: init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ init_reference_points: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None @dataclass class DeformableDetrObjectDetectionOutput(ModelOutput): """ Output type of [`DeformableDetrForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DeformableDetrProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None init_reference_points: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional = None enc_outputs_coord_logits: Optional = None def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->DeformableDetr class DeformableDetrFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->DeformableDetr def replace_batch_norm(m, name=""): for attr_str in dir(m): target_attr = getattr(m, attr_str) if isinstance(target_attr, nn.BatchNorm2d): frozen = DeformableDetrFrozenBatchNorm2d(target_attr.num_features) bn = getattr(m, attr_str) frozen.weight.data.copy_(bn.weight) frozen.bias.data.copy_(bn.bias) frozen.running_mean.data.copy_(bn.running_mean) frozen.running_var.data.copy_(bn.running_var) setattr(m, attr_str, frozen) for n, ch in m.named_children(): replace_batch_norm(ch, n) class DeformableDetrConvEncoder(nn.Module): """ Convolutional backbone, using either the AutoBackbone API or one from the timm library. nn.BatchNorm2d layers are replaced by DeformableDetrFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() self.config = config if config.use_timm_backbone: requires_backends(self, ["timm"]) kwargs = {} if config.dilation: kwargs["output_stride"] = 16 backbone = create_model( config.backbone, pretrained=config.use_pretrained_backbone, features_only=True, out_indices=(2, 3, 4) if config.num_feature_levels > 1 else (4,), in_chans=config.num_channels, **kwargs, ) else: backbone = AutoBackbone.from_config(config.backbone_config) # replace batch norm by frozen batch norm with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = ( self.model.feature_info.channels() if config.use_timm_backbone else self.model.channels ) backbone_model_type = config.backbone if config.use_timm_backbone else config.backbone_config.model_type if "resnet" in backbone_model_type: for name, parameter in self.model.named_parameters(): if config.use_timm_backbone: if "layer2" not in name and "layer3" not in name and "layer4" not in name: parameter.requires_grad_(False) else: if "stage.1" not in name and "stage.2" not in name and "stage.3" not in name: parameter.requires_grad_(False) # Copied from transformers.models.detr.modeling_detr.DetrConvEncoder.forward with Detr->DeformableDetr def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): # send pixel_values through the model to get list of feature maps features = self.model(pixel_values) if self.config.use_timm_backbone else self.model(pixel_values).feature_maps out = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] out.append((feature_map, mask)) return out # Copied from transformers.models.detr.modeling_detr.DetrConvModel with Detr->DeformableDetr class DeformableDetrConvModel(nn.Module): """ This module adds 2D position embeddings to all intermediate feature maps of the convolutional encoder. """ def __init__(self, conv_encoder, position_embedding): super().__init__() self.conv_encoder = conv_encoder self.position_embedding = position_embedding def forward(self, pixel_values, pixel_mask): # send pixel_values and pixel_mask through backbone to get list of (feature_map, pixel_mask) tuples out = self.conv_encoder(pixel_values, pixel_mask) pos = [] for feature_map, mask in out: # position encoding pos.append(self.position_embedding(feature_map, mask).to(feature_map.dtype)) return out, pos # Copied from transformers.models.detr.modeling_detr._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): """ Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. """ batch_size, source_len = mask.size() target_len = target_len if target_len is not None else source_len expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) class DeformableDetrSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: eps = 1e-6 y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * torch_int_div(dim_t, 2 / self.embedding_dim)) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding class DeformableDetrLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->DeformableDetr def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = DeformableDetrSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = DeformableDetrLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding def multi_scale_deformable_attention( value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor ) -> Tensor: batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height * width for height, width in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() class DeformableDetrMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int): super().__init__() if embed_dim % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}" ) dim_per_head = embed_dim // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = embed_dim self.n_levels = n_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2) self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points) self.value_proj = nn.Linear(embed_dim, embed_dim) self.output_proj = nn.Linear(embed_dim, embed_dim) self._reset_parameters() def _reset_parameters(self): nn.init.constant_(self.sampling_offsets.weight.data, 0.0) thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(self.n_heads, 1, 1, 2) .repeat(1, self.n_levels, self.n_points, 1) ) for i in range(self.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) nn.init.constant_(self.attention_weights.weight.data, 0.0) nn.init.constant_(self.attention_weights.bias.data, 0.0) nn.init.xavier_uniform_(self.value_proj.weight.data) nn.init.constant_(self.value_proj.bias.data, 0.0) nn.init.xavier_uniform_(self.output_proj.weight.data) nn.init.constant_(self.output_proj.bias.data, 0.0) def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 if reference_points.shape[-1] == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif reference_points.shape[-1] == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") try: # custom kernel output = MultiScaleDeformableAttentionFunction.apply( value, spatial_shapes, level_start_index, sampling_locations, attention_weights, self.im2col_step, ) except Exception: # PyTorch implementation output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) output = self.output_proj(output) return output, attention_weights class DeformableDetrMultiheadAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # get queries, keys and values query_states = self.q_proj(hidden_states) * self.scaling key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _expand_mask(attention_mask, hidden_states.dtype) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped class DeformableDetrEncoderLayer(nn.Module): def __init__(self, config: DeformableDetrConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DeformableDetrMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.encoder_n_points, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class DeformableDetrDecoderLayer(nn.Module): def __init__(self, config: DeformableDetrConfig): super().__init__() self.embed_dim = config.d_model # self-attention self.self_attn = DeformableDetrMultiheadAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # cross-attention self.encoder_attn = DeformableDetrMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.decoder_n_points, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) # feedforward neural networks self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(seq_len, batch, embed_dim)`. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings that are added to the queries and keys in the self-attention layer. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes. level_start_index (`torch.LongTensor`, *optional*): Level start index. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states # Cross-Attention cross_attn_weights = None hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead class DeformableDetrClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states class DeformableDetrPreTrainedModel(PreTrainedModel): config_class = DeformableDetrConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module): std = self.config.init_std if isinstance(module, DeformableDetrLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) elif isinstance(module, DeformableDetrMultiscaleDeformableAttention): module._reset_parameters() elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if hasattr(module, "reference_points") and not self.config.two_stage: nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) nn.init.constant_(module.reference_points.bias.data, 0.0) if hasattr(module, "level_embed"): nn.init.normal_(module.level_embed) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, DeformableDetrDecoder): module.gradient_checkpointing = value DEFORMABLE_DETR_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`DeformableDetrConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DEFORMABLE_DETR_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`DeformableDetrImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ class DeformableDetrEncoder(DeformableDetrPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a [`DeformableDetrEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. Args: config: DeformableDetrConfig """ def __init__(self, config: DeformableDetrConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DeformableDetrEncoderLayer(config) for _ in range(config.encoder_layers)]) # Initialize weights and apply final processing self.post_init() @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """ Get reference points for each feature map. Used in decoder. Args: spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Valid ratios of each feature map. device (`torch.device`): Device on which to create the tensors. Returns: `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` """ reference_points_list = [] for level, (height, width) in enumerate(spatial_shapes): ref_y, ref_x = meshgrid( torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing="ij", ) # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class DeformableDetrDecoder(DeformableDetrPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DeformableDetrDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some tweaks for Deformable DETR: - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass. - it also returns a stack of intermediate outputs and reference points from all decoding layers. Args: config: DeformableDetrConfig """ def __init__(self, config: DeformableDetrConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DeformableDetrDecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False # hack implementation for iterative bounding box refinement and two-stage Deformable DETR self.bbox_embed = None self.class_embed = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, reference_points=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of the feature maps. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): Indexes for the start of each feature level. In range `[0, sequence_length]`. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None intermediate = () intermediate_reference_points = () for idx, decoder_layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = ( reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] ) else: if reference_points.shape[-1] != 2: raise ValueError("Reference points' last dimension must be of size 2") reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, encoder_hidden_states, encoder_attention_mask, None, ) else: layer_outputs = decoder_layer( hidden_states, position_embeddings=position_embeddings, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points_input, spatial_shapes=spatial_shapes, level_start_index=level_start_index, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] # hack implementation for iterative bounding box refinement if self.bbox_embed is not None: tmp = self.bbox_embed[idx](hidden_states) if reference_points.shape[-1] == 4: new_reference_points = tmp + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() else: if reference_points.shape[-1] != 2: raise ValueError( f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}" ) new_reference_points = tmp new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() intermediate += (hidden_states,) intermediate_reference_points += (reference_points,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # Keep batch_size as first dimension intermediate = torch.stack(intermediate, dim=1) intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, intermediate, intermediate_reference_points, all_hidden_states, all_self_attns, all_cross_attentions, ] if v is not None ) return DeformableDetrDecoderOutput( last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """ The bare Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, DEFORMABLE_DETR_START_DOCSTRING, ) class DeformableDetrModel(DeformableDetrPreTrainedModel): def __init__(self, config: DeformableDetrConfig): super().__init__(config) # Create backbone + positional encoding backbone = DeformableDetrConvEncoder(config) position_embeddings = build_position_encoding(config) self.backbone = DeformableDetrConvModel(backbone, position_embeddings) # Create input projection layers if config.num_feature_levels > 1: num_backbone_outs = len(backbone.intermediate_channel_sizes) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = backbone.intermediate_channel_sizes[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ) for _ in range(config.num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model), ) ) in_channels = config.d_model self.input_proj = nn.ModuleList(input_proj_list) else: self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(backbone.intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ] ) if not config.two_stage: self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2) self.encoder = DeformableDetrEncoder(config) self.decoder = DeformableDetrDecoder(config) self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) if config.two_stage: self.enc_output = nn.Linear(config.d_model, config.d_model) self.enc_output_norm = nn.LayerNorm(config.d_model) self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2) self.pos_trans_norm = nn.LayerNorm(config.d_model * 2) else: self.reference_points = nn.Linear(config.d_model, 2) self.post_init() def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) def get_valid_ratio(self, mask): """Get the valid ratio of all feature maps.""" _, height, width = mask.shape valid_height = torch.sum(mask[:, :, 0], 1) valid_width = torch.sum(mask[:, 0, :], 1) valid_ratio_heigth = valid_height.float() / height valid_ratio_width = valid_width.float() / width valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) return valid_ratio def get_proposal_pos_embed(self, proposals): """Get the position embedding of the proposals.""" num_pos_feats = 128 temperature = 10000 scale = 2 * math.pi dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device) dim_t = temperature ** (2 * torch.div(dim_t, 2) / num_pos_feats) # batch_size, num_queries, 4 proposals = proposals.sigmoid() * scale # batch_size, num_queries, 4, 128 pos = proposals[:, :, :, None] / dim_t # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512 pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) return pos def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): """Generate the encoder output proposals from encoded enc_output. Args: enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder. padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`. spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps. Returns: `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to directly predict a bounding box. (without the need of a decoder) - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse sigmoid. """ batch_size = enc_output.shape[0] proposals = [] _cur = 0 for level, (height, width) in enumerate(spatial_shapes): mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1) valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) grid_y, grid_x = meshgrid( torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing="ij", ) grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) proposals.append(proposal) _cur += height * width output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) # assign each pixel as an object query object_query = enc_output object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) object_query = object_query.masked_fill(~output_proposals_valid, float(0)) object_query = self.enc_output_norm(self.enc_output(object_query)) return object_query, output_proposals @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DeformableDetrModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DeformableDetrModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") >>> model = DeformableDetrModel.from_pretrained("SenseTime/deformable-detr") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 300, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # which is a list of tuples features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) sources = [] masks = [] for level, (source, mask) in enumerate(features): sources.append(self.input_proj[level](source)) masks.append(mask) if mask is None: raise ValueError("No attention mask was provided") # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage if self.config.num_feature_levels > len(sources): _len_sources = len(sources) for level in range(_len_sources, self.config.num_feature_levels): if level == _len_sources: source = self.input_proj[level](features[-1][0]) else: source = self.input_proj[level](sources[-1]) mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) sources.append(source) masks.append(mask) position_embeddings_list.append(pos_l) # Create queries query_embeds = None if not self.config.two_stage: query_embeds = self.query_position_embeddings.weight # Prepare encoder inputs (by flattening) source_flatten = [] mask_flatten = [] lvl_pos_embed_flatten = [] spatial_shapes = [] for level, (source, mask, pos_embed) in enumerate(zip(sources, masks, position_embeddings_list)): batch_size, num_channels, height, width = source.shape spatial_shape = (height, width) spatial_shapes.append(spatial_shape) source = source.flatten(2).transpose(1, 2) mask = mask.flatten(1) pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) source_flatten.append(source) mask_flatten.append(mask) source_flatten = torch.cat(source_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) valid_ratios = valid_ratios.float() # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder # Also provide spatial_shapes, level_start_index and valid_ratios if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, prepare decoder inputs batch_size, _, num_channels = encoder_outputs[0].shape enc_outputs_class = None enc_outputs_coord_logits = None if self.config.two_stage: object_query_embedding, output_proposals = self.gen_encoder_output_proposals( encoder_outputs[0], ~mask_flatten, spatial_shapes ) # hack implementation for two-stage Deformable DETR # apply a detection head to each pixel (A.4 in paper) # linear projection for bounding box binary classification (i.e. foreground and background) enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding) # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding) enc_outputs_coord_logits = delta_bbox + output_proposals # only keep top scoring `config.two_stage_num_proposals` proposals topk = self.config.two_stage_num_proposals topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] topk_coords_logits = torch.gather( enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) ) topk_coords_logits = topk_coords_logits.detach() reference_points = topk_coords_logits.sigmoid() init_reference_points = reference_points pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits))) query_embed, target = torch.split(pos_trans_out, num_channels, dim=2) else: query_embed, target = torch.split(query_embeds, num_channels, dim=1) query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1) target = target.unsqueeze(0).expand(batch_size, -1, -1) reference_points = self.reference_points(query_embed).sigmoid() init_reference_points = reference_points decoder_outputs = self.decoder( inputs_embeds=target, position_embeddings=query_embed, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs return tuple_outputs return DeformableDetrModelOutput( init_reference_points=init_reference_points, last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, ) @add_start_docstrings( """ Deformable DETR Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, DEFORMABLE_DETR_START_DOCSTRING, ) class DeformableDetrForObjectDetection(DeformableDetrPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _keys_to_ignore_on_load_missing = ["bbox_embed\.[1-9]\d*", "class_embed\.[1-9]\d*"] def __init__(self, config: DeformableDetrConfig): super().__init__(config) # Deformable DETR encoder-decoder model self.model = DeformableDetrModel(config) # Detection heads on top self.class_embed = nn.Linear(config.d_model, config.num_labels) self.bbox_embed = DeformableDetrMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) # if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers if config.with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) # hack implementation for iterative bounding box refinement self.model.decoder.bbox_embed = self.bbox_embed else: nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) self.model.decoder.bbox_embed = None if config.two_stage: # hack implementation for two-stage self.model.decoder.class_embed = self.class_embed for box_embed in self.bbox_embed: nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) # Initialize weights and apply final processing self.post_init() # taken from https://github.com/facebookresearch/detr/blob/master/models/detr.py @torch.jit.unused def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(DEFORMABLE_DETR_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DeformableDetrObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DeformableDetrForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("SenseTime/deformable-detr") >>> model = DeformableDetrForObjectDetection.from_pretrained("SenseTime/deformable-detr") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to COCO API >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected cat with confidence 0.8 at location [16.5, 52.84, 318.25, 470.78] Detected cat with confidence 0.789 at location [342.19, 24.3, 640.02, 372.25] Detected remote with confidence 0.633 at location [40.79, 72.78, 176.76, 117.25] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] init_reference = outputs.init_reference_points if return_dict else outputs[0] inter_references = outputs.intermediate_reference_points if return_dict else outputs[3] # class logits + predicted bounding boxes outputs_classes = [] outputs_coords = [] for level in range(hidden_states.shape[1]): if level == 0: reference = init_reference else: reference = inter_references[:, level - 1] reference = inverse_sigmoid(reference) outputs_class = self.class_embed[level](hidden_states[:, level]) delta_bbox = self.bbox_embed[level](hidden_states[:, level]) if reference.shape[-1] == 4: outputs_coord_logits = delta_bbox + reference elif reference.shape[-1] == 2: delta_bbox[..., :2] += reference outputs_coord_logits = delta_bbox else: raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") outputs_coord = outputs_coord_logits.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) # Keep batch_size as first dimension outputs_class = torch.stack(outputs_classes, dim=1) outputs_coord = torch.stack(outputs_coords, dim=1) logits = outputs_class[:, -1] pred_boxes = outputs_coord[:, -1] loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = DeformableDetrHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = DeformableDetrLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, ) criterion.to(self.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if self.config.auxiliary_loss: auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs if self.config.two_stage: enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid() outputs["enc_outputs"] = {"pred_logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord} loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output return tuple_outputs dict_outputs = DeformableDetrObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, ) return dict_outputs # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class DeformableDetrLoss(nn.Module): """ This class computes the losses for `DeformableDetrForObjectDetection`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervise class and box). Args: matcher (`DeformableDetrHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__(self, matcher, num_classes, focal_alpha, losses): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses # removed logging parameter, which was part of the original implementation def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.detr.modeling_detr.DetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.detr.modeling_detr.DetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # (Niels): comment out function below, distributed training to be added # if is_dist_avail_and_initialized(): # torch.distributed.all_reduce(num_boxes) # (Niels) in original implementation, num_boxes is divided by get_world_size() num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) if "enc_outputs" in outputs: enc_outputs = outputs["enc_outputs"] bin_targets = copy.deepcopy(targets) for bt in bin_targets: bt["labels"] = torch.zeros_like(bt["labels"]) indices = self.matcher(enc_outputs, bin_targets) for loss in self.losses: kwargs = {} if loss == "labels": # Logging is enabled only for the last layer kwargs["log"] = False l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs) l_dict = {k + "_enc": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead class DeformableDetrMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x class DeformableDetrHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # Copied from transformers.models.detr.modeling_detr._max_by_axis def _max_by_axis(the_list): # type: (List[List[int]]) -> List[int] maxes = the_list[0] for sublist in the_list[1:]: for index, item in enumerate(sublist): maxes[index] = max(maxes[index], item) return maxes # Copied from transformers.models.detr.modeling_detr.NestedTensor class NestedTensor(object): def __init__(self, tensors, mask: Optional[Tensor]): self.tensors = tensors self.mask = mask def to(self, device): cast_tensor = self.tensors.to(device) mask = self.mask if mask is not None: cast_mask = mask.to(device) else: cast_mask = None return NestedTensor(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask def __repr__(self): return str(self.tensors) # Copied from transformers.models.detr.modeling_detr.nested_tensor_from_tensor_list def nested_tensor_from_tensor_list(tensor_list: List[Tensor]): if tensor_list[0].ndim == 3: max_size = _max_by_axis([list(img.shape) for img in tensor_list]) batch_shape = [len(tensor_list)] + max_size batch_size, num_channels, height, width = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((batch_size, height, width), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], : img.shape[2]] = False else: raise ValueError("Only 3-dimensional tensors are supported") return NestedTensor(tensor, mask)
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/deta/image_processing_deta.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Deformable DETR.""" import pathlib import warnings from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, normalize, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_batched, to_numpy_array, valid_coco_detection_annotations, valid_coco_panoptic_annotations, valid_images, ) from ...utils import ( is_flax_available, is_jax_tensor, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_torchvision_available, is_vision_available, ) from ...utils.generic import ExplicitEnum, TensorType if is_torch_available(): import torch if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_vision_available(): import PIL class AnnotionFormat(ExplicitEnum): COCO_DETECTION = "coco_detection" COCO_PANOPTIC = "coco_panoptic" SUPPORTED_ANNOTATION_FORMATS = (AnnotionFormat.COCO_DETECTION, AnnotionFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ image_size = get_image_size(input_image) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DETA def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool = False): """ Convert the target in COCO format into the format expected by DETA. """ image_height, image_width = get_image_size(image) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints[keep] if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DETA def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True ) -> Dict: """ Prepare a coco panoptic annotation for DETA. """ image_height, image_width = get_image_size(image) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation class DetaImageProcessor(BaseImageProcessor): r""" Constructs a Deformable DETR image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, format: Union[str, AnnotionFormat] = AnnotionFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DETA def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotionFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, ) -> Dict: """ Prepare an annotation for feeding into DETA model. """ format = format if format is not None else self.format if format == AnnotionFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks) elif format == AnnotionFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): warnings.warn( "The `prepare` method is deprecated and will be removed in a future version. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): warnings.warn("The `convert_coco_poly_to_mask` method is deprecated and will be removed in a future version. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection def prepare_coco_detection(self, *args, **kwargs): warnings.warn("The `prepare_coco_detection` method is deprecated and will be removed in a future version. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): warnings.warn("The `prepare_coco_panoptic` method is deprecated and will be removed in a future version. ") return prepare_coco_panoptic_annotation(*args, **kwargs) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize(image, size=size, resample=resample, data_format=data_format) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: Union[float, int], data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List[ImageInput], return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> BatchFeature: """ Pads a batch of images with zeros to the size of largest height and width in the batch and returns their corresponding pixel mask. Args: images (`List[np.ndarray]`): Batch of images to pad. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ warnings.warn( "This method is deprecated and will be removed in v4.27.0. Please use pad instead.", FutureWarning ) # pad expects a list of np.ndarray, but the previous feature extractors expected torch tensors images = [to_numpy_array(image) for image in pixel_values_list] return self.pad( images=images, return_pixel_mask=True, return_tensors=return_tensors, data_format=data_format, ) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def preprocess( self, images: ImageInput, annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotionFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. annotations (`List[Dict]` or `List[List[Dict]]`, *optional*): List of annotations associated with the image or batch of images. If annotionation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotionation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotionFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`str` or `ChannelDimension`, *optional*, defaults to self.data_format): The channel dimension format of the image. If not provided, it will be the same as the input image. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead.", FutureWarning, ) do_pad = kwargs.pop("pad_and_return_pixel_mask") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") if not is_batched(images): images = [images] annotations = [annotations] if annotations is not None else None if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotionFormat(format) if annotations is not None: if format == AnnotionFormat.COCO_DETECTION and not valid_coco_detection_annotations(annotations): raise ValueError( "Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts" "(batch of images) with the following keys: `image_id` and `annotations`, with the latter " "being a list of annotations in the COCO format." ) elif format == AnnotionFormat.COCO_PANOPTIC and not valid_coco_panoptic_annotations(annotations): raise ValueError( "Invalid COCO panoptic annotations. Annotations must a dict (single image) of list of dicts " "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with " "the latter being a list of annotations in the COCO format." ) elif format not in SUPPORTED_ANNOTATION_FORMATS: raise ValueError( f"Unsupported annotation format: {format} must be one of {SUPPORTED_ANNOTATION_FORMATS}" ) if ( masks_path is not None and format == AnnotionFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image) resized_image = self.resize(image, size=size, resample=resample) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std) for image in images] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad(images, return_pixel_mask=True, data_format=data_format) else: images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, nms_threshold: float = 0.7, ): """ Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. nms_threshold (`float`, *optional*, defaults to 0.7): NMS threshold. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes batch_size, num_queries, num_labels = out_logits.shape if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device) all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device) all_boxes = all_indexes // out_logits.shape[2] all_labels = all_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for b in range(batch_size): box = boxes[b] score = all_scores[b] lbls = all_labels[b] pre_topk = score.topk(min(10000, len(score))).indices box = box[pre_topk] score = score[pre_topk] lbls = lbls[pre_topk] # apply NMS keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100] score = score[keep_inds] lbls = lbls[keep_inds] box = box[keep_inds] results.append( { "scores": score[score > threshold], "labels": lbls[score > threshold], "boxes": box[score > threshold], } ) return results
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Deformable DETR.""" import pathlib import warnings from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...feature_extraction_utils import BatchFeature from ...image_processing_utils import BaseImageProcessor, get_size_dict from ...image_transforms import ( PaddingMode, center_to_corners_format, corners_to_center_format, normalize, pad, rescale, resize, rgb_to_id, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_batched, to_numpy_array, valid_coco_detection_annotations, valid_coco_panoptic_annotations, valid_images, ) from ...utils import ( is_flax_available, is_jax_tensor, is_tf_available, is_tf_tensor, is_torch_available, is_torch_tensor, is_torchvision_available, is_vision_available, ) from ...utils.generic import ExplicitEnum, TensorType if is_torch_available(): import torch from ...pytorch_utils import torch_int_div if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_vision_available(): import PIL class AnnotionFormat(ExplicitEnum): COCO_DETECTION = "coco_detection" COCO_PANOPTIC = "coco_panoptic" SUPPORTED_ANNOTATION_FORMATS = (AnnotionFormat.COCO_DETECTION, AnnotionFormat.COCO_PANOPTIC) # Copied from transformers.models.detr.image_processing_detr.get_size_with_aspect_ratio def get_size_with_aspect_ratio(image_size, size, max_size=None) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ height, width = image_size if max_size is not None: min_original_size = float(min((height, width))) max_original_size = float(max((height, width))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (height <= width and height == size) or (width <= height and width == size): return height, width if width < height: ow = size oh = int(size * height / width) else: oh = size ow = int(size * width / height) return (oh, ow) # Copied from transformers.models.detr.image_processing_detr.get_resize_output_image_size def get_resize_output_image_size( input_image: np.ndarray, size: Union[int, Tuple[int, int], List[int]], max_size: Optional[int] = None ) -> Tuple[int, int]: """ Computes the output image size given the input image size and the desired output size. If the desired output size is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output image size is computed by keeping the aspect ratio of the input image size. Args: image_size (`Tuple[int, int]`): The input image size. size (`int`): The desired output size. max_size (`int`, *optional*): The maximum allowed output size. """ image_size = get_image_size(input_image) if isinstance(size, (list, tuple)): return size return get_size_with_aspect_ratio(image_size, size, max_size) # Copied from transformers.models.detr.image_processing_detr.get_numpy_to_framework_fn def get_numpy_to_framework_fn(arr) -> Callable: """ Returns a function that converts a numpy array to the framework of the input array. Args: arr (`np.ndarray`): The array to convert. """ if isinstance(arr, np.ndarray): return np.array if is_tf_available() and is_tf_tensor(arr): import tensorflow as tf return tf.convert_to_tensor if is_torch_available() and is_torch_tensor(arr): import torch return torch.tensor if is_flax_available() and is_jax_tensor(arr): import jax.numpy as jnp return jnp.array raise ValueError(f"Cannot convert arrays of type {type(arr)}") # Copied from transformers.models.detr.image_processing_detr.safe_squeeze def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray: """ Squeezes an array, but only if the axis specified has dim 1. """ if axis is None: return arr.squeeze() try: return arr.squeeze(axis=axis) except ValueError: return arr # Copied from transformers.models.detr.image_processing_detr.normalize_annotation def normalize_annotation(annotation: Dict, image_size: Tuple[int, int]) -> Dict: image_height, image_width = image_size norm_annotation = {} for key, value in annotation.items(): if key == "boxes": boxes = value boxes = corners_to_center_format(boxes) boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32) norm_annotation[key] = boxes else: norm_annotation[key] = value return norm_annotation # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.convert_coco_poly_to_mask def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray: """ Convert a COCO polygon annotation to a mask. Args: segmentations (`List[List[float]]`): List of polygons, each polygon represented by a list of x-y coordinates. height (`int`): Height of the mask. width (`int`): Width of the mask. """ try: from pycocotools import mask as coco_mask except ImportError: raise ImportError("Pycocotools is not installed in your environment.") masks = [] for polygons in segmentations: rles = coco_mask.frPyObjects(polygons, height, width) mask = coco_mask.decode(rles) if len(mask.shape) < 3: mask = mask[..., None] mask = np.asarray(mask, dtype=np.uint8) mask = np.any(mask, axis=2) masks.append(mask) if masks: masks = np.stack(masks, axis=0) else: masks = np.zeros((0, height, width), dtype=np.uint8) return masks # Copied from transformers.models.detr.image_processing_detr.prepare_coco_detection_annotation with DETR->DETA def prepare_coco_detection_annotation(image, target, return_segmentation_masks: bool = False): """ Convert the target in COCO format into the format expected by DETA. """ image_height, image_width = get_image_size(image) image_id = target["image_id"] image_id = np.asarray([image_id], dtype=np.int64) # Get all COCO annotations for the given image. annotations = target["annotations"] annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0] classes = [obj["category_id"] for obj in annotations] classes = np.asarray(classes, dtype=np.int64) # for conversion to coco api area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32) iscrowd = np.asarray([obj["iscrowd"] if "iscrowd" in obj else 0 for obj in annotations], dtype=np.int64) boxes = [obj["bbox"] for obj in annotations] # guard against no boxes via resizing boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4) boxes[:, 2:] += boxes[:, :2] boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width) boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height) keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) new_target = {} new_target["image_id"] = image_id new_target["class_labels"] = classes[keep] new_target["boxes"] = boxes[keep] new_target["area"] = area[keep] new_target["iscrowd"] = iscrowd[keep] new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64) if annotations and "keypoints" in annotations[0]: keypoints = [obj["keypoints"] for obj in annotations] keypoints = np.asarray(keypoints, dtype=np.float32) num_keypoints = keypoints.shape[0] keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints new_target["keypoints"] = keypoints[keep] if return_segmentation_masks: segmentation_masks = [obj["segmentation"] for obj in annotations] masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width) new_target["masks"] = masks[keep] return new_target # Copied from transformers.models.detr.image_processing_detr.masks_to_boxes def masks_to_boxes(masks: np.ndarray) -> np.ndarray: """ Compute the bounding boxes around the provided panoptic segmentation masks. Args: masks: masks in format `[number_masks, height, width]` where N is the number of masks Returns: boxes: bounding boxes in format `[number_masks, 4]` in xyxy format """ if masks.size == 0: return np.zeros((0, 4)) h, w = masks.shape[-2:] y = np.arange(0, h, dtype=np.float32) x = np.arange(0, w, dtype=np.float32) # see https://github.com/pytorch/pytorch/issues/50276 y, x = np.meshgrid(y, x, indexing="ij") x_mask = masks * np.expand_dims(x, axis=0) x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1) x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool))) x_min = x.filled(fill_value=1e8) x_min = x_min.reshape(x_min.shape[0], -1).min(-1) y_mask = masks * np.expand_dims(y, axis=0) y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1) y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool))) y_min = y.filled(fill_value=1e8) y_min = y_min.reshape(y_min.shape[0], -1).min(-1) return np.stack([x_min, y_min, x_max, y_max], 1) # Copied from transformers.models.detr.image_processing_detr.prepare_coco_panoptic_annotation with DETR->DETA def prepare_coco_panoptic_annotation( image: np.ndarray, target: Dict, masks_path: Union[str, pathlib.Path], return_masks: bool = True ) -> Dict: """ Prepare a coco panoptic annotation for DETA. """ image_height, image_width = get_image_size(image) annotation_path = pathlib.Path(masks_path) / target["file_name"] new_target = {} new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64) new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64) new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64) if "segments_info" in target: masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32) masks = rgb_to_id(masks) ids = np.array([segment_info["id"] for segment_info in target["segments_info"]]) masks = masks == ids[:, None, None] masks = masks.astype(np.uint8) if return_masks: new_target["masks"] = masks new_target["boxes"] = masks_to_boxes(masks) new_target["class_labels"] = np.array( [segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["iscrowd"] = np.asarray( [segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64 ) new_target["area"] = np.asarray( [segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32 ) return new_target # Copied from transformers.models.detr.image_processing_detr.resize_annotation def resize_annotation( annotation: Dict[str, Any], orig_size: Tuple[int, int], target_size: Tuple[int, int], threshold: float = 0.5, resample: PILImageResampling = PILImageResampling.NEAREST, ): """ Resizes an annotation to a target size. Args: annotation (`Dict[str, Any]`): The annotation dictionary. orig_size (`Tuple[int, int]`): The original size of the input image. target_size (`Tuple[int, int]`): The target size of the image, as returned by the preprocessing `resize` step. threshold (`float`, *optional*, defaults to 0.5): The threshold used to binarize the segmentation masks. resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`): The resampling filter to use when resizing the masks. """ ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size)) ratio_height, ratio_width = ratios new_annotation = {} new_annotation["size"] = target_size for key, value in annotation.items(): if key == "boxes": boxes = value scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32) new_annotation["boxes"] = scaled_boxes elif key == "area": area = value scaled_area = area * (ratio_width * ratio_height) new_annotation["area"] = scaled_area elif key == "masks": masks = value[:, None] masks = np.array([resize(mask, target_size, resample=resample) for mask in masks]) masks = masks.astype(np.float32) masks = masks[:, 0] > threshold new_annotation["masks"] = masks elif key == "size": new_annotation["size"] = target_size else: new_annotation[key] = value return new_annotation class DetaImageProcessor(BaseImageProcessor): r""" Constructs a Deformable DETR image processor. Args: format (`str`, *optional*, defaults to `"coco_detection"`): Data format of the annotations. One of "coco_detection" or "coco_panoptic". do_resize (`bool`, *optional*, defaults to `True`): Controls whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the `do_resize` parameter in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 800, "longest_edge": 1333}`): Size of the image's (height, width) dimensions after resizing. Can be overridden by the `size` parameter in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BILINEAR`): Resampling filter to use if resizing the image. do_rescale (`bool`, *optional*, defaults to `True`): Controls whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize: Controls whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`): Mean values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`): Standard deviation values to use when normalizing the image. Can be a single value or a list of values, one for each channel. Can be overridden by the `image_std` parameter in the `preprocess` method. do_pad (`bool`, *optional*, defaults to `True`): Controls whether to pad the image to the largest image in a batch and create a pixel mask. Can be overridden by the `do_pad` parameter in the `preprocess` method. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, format: Union[str, AnnotionFormat] = AnnotionFormat.COCO_DETECTION, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, do_pad: bool = True, **kwargs, ) -> None: if "pad_and_return_pixel_mask" in kwargs: do_pad = kwargs.pop("pad_and_return_pixel_mask") size = size if size is not None else {"shortest_edge": 800, "longest_edge": 1333} size = get_size_dict(size, default_to_square=False) super().__init__(**kwargs) self.format = format self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.do_pad = do_pad # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_annotation with DETR->DETA def prepare_annotation( self, image: np.ndarray, target: Dict, format: Optional[AnnotionFormat] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, ) -> Dict: """ Prepare an annotation for feeding into DETA model. """ format = format if format is not None else self.format if format == AnnotionFormat.COCO_DETECTION: return_segmentation_masks = False if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_detection_annotation(image, target, return_segmentation_masks) elif format == AnnotionFormat.COCO_PANOPTIC: return_segmentation_masks = True if return_segmentation_masks is None else return_segmentation_masks target = prepare_coco_panoptic_annotation( image, target, masks_path=masks_path, return_masks=return_segmentation_masks ) else: raise ValueError(f"Format {format} is not supported.") return target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare def prepare(self, image, target, return_segmentation_masks=None, masks_path=None): warnings.warn( "The `prepare` method is deprecated and will be removed in a future version. " "Please use `prepare_annotation` instead. Note: the `prepare_annotation` method " "does not return the image anymore.", ) target = self.prepare_annotation(image, target, return_segmentation_masks, masks_path, self.format) return image, target # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.convert_coco_poly_to_mask def convert_coco_poly_to_mask(self, *args, **kwargs): warnings.warn("The `convert_coco_poly_to_mask` method is deprecated and will be removed in a future version. ") return convert_coco_poly_to_mask(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_detection def prepare_coco_detection(self, *args, **kwargs): warnings.warn("The `prepare_coco_detection` method is deprecated and will be removed in a future version. ") return prepare_coco_detection_annotation(*args, **kwargs) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.prepare_coco_panoptic def prepare_coco_panoptic(self, *args, **kwargs): warnings.warn("The `prepare_coco_panoptic` method is deprecated and will be removed in a future version. ") return prepare_coco_panoptic_annotation(*args, **kwargs) def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format: Optional[ChannelDimension] = None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be `min_size` (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ size = get_size_dict(size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"]) elif "height" in size and "width" in size: size = (size["height"], size["width"]) else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) image = resize(image, size=size, resample=resample, data_format=data_format) return image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.resize_annotation def resize_annotation( self, annotation, orig_size, size, resample: PILImageResampling = PILImageResampling.NEAREST, ) -> Dict: """ Resize the annotation to match the resized image. If size is an int, smaller edge of the mask will be matched to this number. """ return resize_annotation(annotation, orig_size=orig_size, target_size=size, resample=resample) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: Union[float, int], data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize_annotation def normalize_annotation(self, annotation: Dict, image_size: Tuple[int, int]) -> Dict: """ Normalize the boxes in the annotation from `[top_left_x, top_left_y, bottom_right_x, bottom_right_y]` to `[center_x, center_y, width, height]` format. """ return normalize_annotation(annotation, image_size=image_size) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad_and_create_pixel_mask def pad_and_create_pixel_mask( self, pixel_values_list: List[ImageInput], return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> BatchFeature: """ Pads a batch of images with zeros to the size of largest height and width in the batch and returns their corresponding pixel mask. Args: images (`List[np.ndarray]`): Batch of images to pad. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ warnings.warn( "This method is deprecated and will be removed in v4.27.0. Please use pad instead.", FutureWarning ) # pad expects a list of np.ndarray, but the previous feature extractors expected torch tensors images = [to_numpy_array(image) for image in pixel_values_list] return self.pad( images=images, return_pixel_mask=True, return_tensors=return_tensors, data_format=data_format, ) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def preprocess( self, images: ImageInput, annotations: Optional[Union[List[Dict], List[List[Dict]]]] = None, return_segmentation_masks: bool = None, masks_path: Optional[Union[str, pathlib.Path]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample=None, # PILImageResampling do_rescale: Optional[bool] = None, rescale_factor: Optional[Union[int, float]] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_pad: Optional[bool] = None, format: Optional[Union[str, AnnotionFormat]] = None, return_tensors: Optional[Union[TensorType, str]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: """ Preprocess an image or a batch of images so that it can be used by the model. Args: images (`ImageInput`): Image or batch of images to preprocess. annotations (`List[Dict]` or `List[List[Dict]]`, *optional*): List of annotations associated with the image or batch of images. If annotionation is for object detection, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "annotations" (`List[Dict]`): List of annotations for an image. Each annotation should be a dictionary. An image can have no annotations, in which case the list should be empty. If annotionation is for segmentation, the annotations should be a dictionary with the following keys: - "image_id" (`int`): The image id. - "segments_info" (`List[Dict]`): List of segments for an image. Each segment should be a dictionary. An image can have no segments, in which case the list should be empty. - "file_name" (`str`): The file name of the image. return_segmentation_masks (`bool`, *optional*, defaults to self.return_segmentation_masks): Whether to return segmentation masks. masks_path (`str` or `pathlib.Path`, *optional*): Path to the directory containing the segmentation masks. do_resize (`bool`, *optional*, defaults to self.do_resize): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to self.size): Size of the image after resizing. resample (`PILImageResampling`, *optional*, defaults to self.resample): Resampling filter to use when resizing the image. do_rescale (`bool`, *optional*, defaults to self.do_rescale): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to self.rescale_factor): Rescale factor to use when rescaling the image. do_normalize (`bool`, *optional*, defaults to self.do_normalize): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to self.image_mean): Mean to use when normalizing the image. image_std (`float` or `List[float]`, *optional*, defaults to self.image_std): Standard deviation to use when normalizing the image. do_pad (`bool`, *optional*, defaults to self.do_pad): Whether to pad the image. format (`str` or `AnnotionFormat`, *optional*, defaults to self.format): Format of the annotations. return_tensors (`str` or `TensorType`, *optional*, defaults to self.return_tensors): Type of tensors to return. If `None`, will return the list of images. data_format (`str` or `ChannelDimension`, *optional*, defaults to self.data_format): The channel dimension format of the image. If not provided, it will be the same as the input image. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version, " "use `do_pad` instead.", FutureWarning, ) do_pad = kwargs.pop("pad_and_return_pixel_mask") do_resize = self.do_resize if do_resize is None else do_resize size = self.size if size is None else size size = get_size_dict(size=size, default_to_square=False) resample = self.resample if resample is None else resample do_rescale = self.do_rescale if do_rescale is None else do_rescale rescale_factor = self.rescale_factor if rescale_factor is None else rescale_factor do_normalize = self.do_normalize if do_normalize is None else do_normalize image_mean = self.image_mean if image_mean is None else image_mean image_std = self.image_std if image_std is None else image_std do_pad = self.do_pad if do_pad is None else do_pad format = self.format if format is None else format if do_resize is not None and size is None: raise ValueError("Size and max_size must be specified if do_resize is True.") if do_rescale is not None and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") if not is_batched(images): images = [images] annotations = [annotations] if annotations is not None else None if annotations is not None and len(images) != len(annotations): raise ValueError( f"The number of images ({len(images)}) and annotations ({len(annotations)}) do not match." ) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) format = AnnotionFormat(format) if annotations is not None: if format == AnnotionFormat.COCO_DETECTION and not valid_coco_detection_annotations(annotations): raise ValueError( "Invalid COCO detection annotations. Annotations must a dict (single image) of list of dicts" "(batch of images) with the following keys: `image_id` and `annotations`, with the latter " "being a list of annotations in the COCO format." ) elif format == AnnotionFormat.COCO_PANOPTIC and not valid_coco_panoptic_annotations(annotations): raise ValueError( "Invalid COCO panoptic annotations. Annotations must a dict (single image) of list of dicts " "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with " "the latter being a list of annotations in the COCO format." ) elif format not in SUPPORTED_ANNOTATION_FORMATS: raise ValueError( f"Unsupported annotation format: {format} must be one of {SUPPORTED_ANNOTATION_FORMATS}" ) if ( masks_path is not None and format == AnnotionFormat.COCO_PANOPTIC and not isinstance(masks_path, (pathlib.Path, str)) ): raise ValueError( "The path to the directory containing the mask PNG files should be provided as a" f" `pathlib.Path` or string object, but is {type(masks_path)} instead." ) # All transformations expect numpy arrays images = [to_numpy_array(image) for image in images] # prepare (COCO annotations as a list of Dict -> DETR target as a single Dict per image) if annotations is not None: prepared_images = [] prepared_annotations = [] for image, target in zip(images, annotations): target = self.prepare_annotation( image, target, format, return_segmentation_masks=return_segmentation_masks, masks_path=masks_path ) prepared_images.append(image) prepared_annotations.append(target) images = prepared_images annotations = prepared_annotations del prepared_images, prepared_annotations # transformations if do_resize: if annotations is not None: resized_images, resized_annotations = [], [] for image, target in zip(images, annotations): orig_size = get_image_size(image) resized_image = self.resize(image, size=size, resample=resample) resized_annotation = self.resize_annotation(target, orig_size, get_image_size(resized_image)) resized_images.append(resized_image) resized_annotations.append(resized_annotation) images = resized_images annotations = resized_annotations del resized_images, resized_annotations else: images = [self.resize(image, size=size, resample=resample) for image in images] if do_rescale: images = [self.rescale(image, rescale_factor) for image in images] if do_normalize: images = [self.normalize(image, image_mean, image_std) for image in images] if annotations is not None: annotations = [ self.normalize_annotation(annotation, get_image_size(image)) for annotation, image in zip(annotations, images) ] if do_pad: # Pads images and returns their mask: {'pixel_values': ..., 'pixel_mask': ...} data = self.pad(images, return_pixel_mask=True, data_format=data_format) else: images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} encoded_inputs = BatchFeature(data=data, tensor_type=return_tensors) if annotations is not None: encoded_inputs["labels"] = [ BatchFeature(annotation, tensor_type=return_tensors) for annotation in annotations ] return encoded_inputs def post_process_object_detection( self, outputs, threshold: float = 0.5, target_sizes: Union[TensorType, List[Tuple]] = None, nms_threshold: float = 0.7, ): """ Converts the output of [`DetaForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format. Only supports PyTorch. Args: outputs ([`DetrObjectDetectionOutput`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): Score threshold to keep object detection predictions. target_sizes (`torch.Tensor` or `List[Tuple[int, int]]`, *optional*): Tensor of shape `(batch_size, 2)` or list of tuples (`Tuple[int, int]`) containing the target size (height, width) of each image in the batch. If left to None, predictions will not be resized. nms_threshold (`float`, *optional*, defaults to 0.7): NMS threshold. Returns: `List[Dict]`: A list of dictionaries, each dictionary containing the scores, labels and boxes for an image in the batch as predicted by the model. """ out_logits, out_bbox = outputs.logits, outputs.pred_boxes batch_size, num_queries, num_labels = out_logits.shape if target_sizes is not None: if len(out_logits) != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) prob = out_logits.sigmoid() all_scores = prob.view(batch_size, num_queries * num_labels).to(out_logits.device) all_indexes = torch.arange(num_queries * num_labels)[None].repeat(batch_size, 1).to(out_logits.device) all_boxes = torch_int_div(all_indexes, out_logits.shape[2]) all_labels = all_indexes % out_logits.shape[2] boxes = center_to_corners_format(out_bbox) boxes = torch.gather(boxes, 1, all_boxes.unsqueeze(-1).repeat(1, 1, 4)) # and from relative [0, 1] to absolute [0, height] coordinates if target_sizes is not None: if isinstance(target_sizes, List): img_h = torch.Tensor([i[0] for i in target_sizes]) img_w = torch.Tensor([i[1] for i in target_sizes]) else: img_h, img_w = target_sizes.unbind(1) scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1).to(boxes.device) boxes = boxes * scale_fct[:, None, :] results = [] for b in range(batch_size): box = boxes[b] score = all_scores[b] lbls = all_labels[b] pre_topk = score.topk(min(10000, len(score))).indices box = box[pre_topk] score = score[pre_topk] lbls = lbls[pre_topk] # apply NMS keep_inds = batched_nms(box, score, lbls, nms_threshold)[:100] score = score[keep_inds] lbls = lbls[keep_inds] box = box[keep_inds] results.append( { "scores": score[score > threshold], "labels": lbls[score > threshold], "boxes": box[score > threshold], } ) return results
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/deta/modeling_deta.py
# coding=utf-8 # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DETA model.""" import copy import math import warnings from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_vision_available, replace_return_docstrings, ) from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid from ...utils import is_torchvision_available, logging, requires_backends from ..auto import AutoBackbone from .configuration_deta import DetaConfig logger = logging.get_logger(__name__) if is_vision_available(): from transformers.image_transforms import center_to_corners_format if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_scipy_available(): from scipy.optimize import linear_sum_assignment logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DetaConfig" _CHECKPOINT_FOR_DOC = "jozhang97/deta-swin-large-o365" DETA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "jozhang97/deta-swin-large-o365", # See all DETA models at https://huggingface.co/models?filter=deta ] @dataclass # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoderOutput with DeformableDetr->Deta class DetaDecoderOutput(ModelOutput): """ Base class for outputs of the DetaDecoder. This class adds two attributes to BaseModelOutputWithCrossAttentions, namely: - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) - a stacked tensor of intermediate reference points. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`): Stacked intermediate reference points (reference points of each layer of the decoder). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModelOutput with DeformableDetr->Deta,Deformable DETR->DETA class DetaModelOutput(ModelOutput): """ Base class for outputs of the Deformable DETR encoder-decoder model. Args: init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ init_reference_points: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None @dataclass # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrObjectDetectionOutput with DeformableDetr->Deta class DetaObjectDetectionOutput(ModelOutput): """ Output type of [`DetaForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DetaProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None init_reference_points: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional = None enc_outputs_coord_logits: Optional = None def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->Deta class DetaFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->Deta def replace_batch_norm(m, name=""): for attr_str in dir(m): target_attr = getattr(m, attr_str) if isinstance(target_attr, nn.BatchNorm2d): frozen = DetaFrozenBatchNorm2d(target_attr.num_features) bn = getattr(m, attr_str) frozen.weight.data.copy_(bn.weight) frozen.bias.data.copy_(bn.bias) frozen.running_mean.data.copy_(bn.running_mean) frozen.running_var.data.copy_(bn.running_var) setattr(m, attr_str, frozen) for n, ch in m.named_children(): replace_batch_norm(ch, n) class DetaBackboneWithPositionalEncodings(nn.Module): """ Backbone model with positional embeddings. nn.BatchNorm2d layers are replaced by DetaFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() backbone = AutoBackbone.from_config(config.backbone_config) with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.channels # TODO fix this if config.backbone_config.model_type == "resnet": for name, parameter in self.model.named_parameters(): if "stages.1" not in name and "stages.2" not in name and "stages.3" not in name: parameter.requires_grad_(False) self.position_embedding = build_position_encoding(config) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): """ Outputs feature maps of latter stages C_3 through C_5 in ResNet if `config.num_feature_levels > 1`, otherwise outputs feature maps of C_5. """ # first, send pixel_values through the backbone to get list of feature maps features = self.model(pixel_values).feature_maps # next, create position embeddings out = [] pos = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] position_embeddings = self.position_embedding(feature_map, mask).to(feature_map.dtype) out.append((feature_map, mask)) pos.append(position_embeddings) return out, pos # Copied from transformers.models.detr.modeling_detr._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): """ Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. """ batch_size, source_len = mask.size() target_len = target_len if target_len is not None else source_len expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrSinePositionEmbedding with DeformableDetr->Deta class DetaSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: eps = 1e-6 y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * (dim_t // 2) / self.embedding_dim) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding class DetaLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->Deta def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = DetaSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = DetaLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding # Copied from transformers.models.deformable_detr.modeling_deformable_detr.multi_scale_deformable_attention def multi_scale_deformable_attention( value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor ) -> Tensor: batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height * width for height, width in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() class DetaMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention.__init__ with DeformableDetr->Deta def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int): super().__init__() if embed_dim % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}" ) dim_per_head = embed_dim // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in DetaMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = embed_dim self.n_levels = n_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2) self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points) self.value_proj = nn.Linear(embed_dim, embed_dim) self.output_proj = nn.Linear(embed_dim, embed_dim) self._reset_parameters() def _reset_parameters(self): nn.init.constant_(self.sampling_offsets.weight.data, 0.0) thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(self.n_heads, 1, 1, 2) .repeat(1, self.n_levels, self.n_points, 1) ) for i in range(self.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) nn.init.constant_(self.attention_weights.weight.data, 0.0) nn.init.constant_(self.attention_weights.bias.data, 0.0) nn.init.xavier_uniform_(self.value_proj.weight.data) nn.init.constant_(self.value_proj.bias.data, 0.0) nn.init.xavier_uniform_(self.output_proj.weight.data) nn.init.constant_(self.output_proj.bias.data, 0.0) def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 if reference_points.shape[-1] == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif reference_points.shape[-1] == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") # PyTorch implementation (for now) output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) output = self.output_proj(output) return output, attention_weights # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrMultiheadAttention with DeformableDetr->Deta,Deformable DETR->DETA class DetaMultiheadAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # get queries, keys and values query_states = self.q_proj(hidden_states) * self.scaling key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _expand_mask(attention_mask, hidden_states.dtype) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrEncoderLayer with DeformableDetr->Deta class DetaEncoderLayer(nn.Module): def __init__(self, config: DetaConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetaMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.encoder_n_points, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoderLayer with DeformableDetr->Deta class DetaDecoderLayer(nn.Module): def __init__(self, config: DetaConfig): super().__init__() self.embed_dim = config.d_model # self-attention self.self_attn = DetaMultiheadAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # cross-attention self.encoder_attn = DetaMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.decoder_n_points, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) # feedforward neural networks self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(seq_len, batch, embed_dim)`. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings that are added to the queries and keys in the self-attention layer. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes. level_start_index (`torch.LongTensor`, *optional*): Level start index. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states # Cross-Attention cross_attn_weights = None hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead class DetaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrPreTrainedModel with DeformableDetr->Deta class DetaPreTrainedModel(PreTrainedModel): config_class = DetaConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module): std = self.config.init_std if isinstance(module, DetaLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) elif isinstance(module, DetaMultiscaleDeformableAttention): module._reset_parameters() elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if hasattr(module, "reference_points") and not self.config.two_stage: nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) nn.init.constant_(module.reference_points.bias.data, 0.0) if hasattr(module, "level_embed"): nn.init.normal_(module.level_embed) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, DetaDecoder): module.gradient_checkpointing = value DETA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`DetaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DETA_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrEncoder with DeformableDetr->Deta class DetaEncoder(DetaPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a [`DetaEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. Args: config: DetaConfig """ def __init__(self, config: DetaConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DetaEncoderLayer(config) for _ in range(config.encoder_layers)]) # Initialize weights and apply final processing self.post_init() @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """ Get reference points for each feature map. Used in decoder. Args: spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Valid ratios of each feature map. device (`torch.device`): Device on which to create the tensors. Returns: `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` """ reference_points_list = [] for level, (height, width) in enumerate(spatial_shapes): ref_y, ref_x = meshgrid( torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing="ij", ) # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoder with DeformableDetr->Deta,Deformable DETR->DETA class DetaDecoder(DetaPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DetaDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some tweaks for Deformable DETR: - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass. - it also returns a stack of intermediate outputs and reference points from all decoding layers. Args: config: DetaConfig """ def __init__(self, config: DetaConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DetaDecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False # hack implementation for iterative bounding box refinement and two-stage Deformable DETR self.bbox_embed = None self.class_embed = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, reference_points=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of the feature maps. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): Indexes for the start of each feature level. In range `[0, sequence_length]`. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None intermediate = () intermediate_reference_points = () for idx, decoder_layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = ( reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] ) else: if reference_points.shape[-1] != 2: raise ValueError("Reference points' last dimension must be of size 2") reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, encoder_hidden_states, encoder_attention_mask, None, ) else: layer_outputs = decoder_layer( hidden_states, position_embeddings=position_embeddings, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points_input, spatial_shapes=spatial_shapes, level_start_index=level_start_index, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] # hack implementation for iterative bounding box refinement if self.bbox_embed is not None: tmp = self.bbox_embed[idx](hidden_states) if reference_points.shape[-1] == 4: new_reference_points = tmp + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() else: if reference_points.shape[-1] != 2: raise ValueError( f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}" ) new_reference_points = tmp new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() intermediate += (hidden_states,) intermediate_reference_points += (reference_points,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # Keep batch_size as first dimension intermediate = torch.stack(intermediate, dim=1) intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, intermediate, intermediate_reference_points, all_hidden_states, all_self_attns, all_cross_attentions, ] if v is not None ) return DetaDecoderOutput( last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """ The bare DETA Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, DETA_START_DOCSTRING, ) class DetaModel(DetaPreTrainedModel): def __init__(self, config: DetaConfig): super().__init__(config) if config.two_stage: requires_backends(self, ["torchvision"]) # Create backbone with positional encoding self.backbone = DetaBackboneWithPositionalEncodings(config) intermediate_channel_sizes = self.backbone.intermediate_channel_sizes # Create input projection layers if config.num_feature_levels > 1: num_backbone_outs = len(intermediate_channel_sizes) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = intermediate_channel_sizes[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ) for _ in range(config.num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model), ) ) in_channels = config.d_model self.input_proj = nn.ModuleList(input_proj_list) else: self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ] ) if not config.two_stage: self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2) self.encoder = DetaEncoder(config) self.decoder = DetaDecoder(config) self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) if config.two_stage: self.enc_output = nn.Linear(config.d_model, config.d_model) self.enc_output_norm = nn.LayerNorm(config.d_model) self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2) self.pos_trans_norm = nn.LayerNorm(config.d_model * 2) self.pix_trans = nn.Linear(config.d_model, config.d_model) self.pix_trans_norm = nn.LayerNorm(config.d_model) else: self.reference_points = nn.Linear(config.d_model, 2) self.assign_first_stage = config.assign_first_stage self.two_stage_num_proposals = config.two_stage_num_proposals self.post_init() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_decoder def get_decoder(self): return self.decoder # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.freeze_backbone def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.unfreeze_backbone def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_valid_ratio def get_valid_ratio(self, mask): """Get the valid ratio of all feature maps.""" _, height, width = mask.shape valid_height = torch.sum(mask[:, :, 0], 1) valid_width = torch.sum(mask[:, 0, :], 1) valid_ratio_heigth = valid_height.float() / height valid_ratio_width = valid_width.float() / width valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) return valid_ratio # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_proposal_pos_embed def get_proposal_pos_embed(self, proposals): """Get the position embedding of the proposals.""" num_pos_feats = 128 temperature = 10000 scale = 2 * math.pi dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device) dim_t = temperature ** (2 * (dim_t // 2) / num_pos_feats) # batch_size, num_queries, 4 proposals = proposals.sigmoid() * scale # batch_size, num_queries, 4, 128 pos = proposals[:, :, :, None] / dim_t # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512 pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) return pos def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): """Generate the encoder output proposals from encoded enc_output. Args: enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder. padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`. spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps. Returns: `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to directly predict a bounding box. (without the need of a decoder) - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse sigmoid. """ batch_size = enc_output.shape[0] proposals = [] _cur = 0 level_ids = [] for level, (height, width) in enumerate(spatial_shapes): mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1) valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) grid_y, grid_x = meshgrid( torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing="ij", ) grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) proposals.append(proposal) _cur += height * width level_ids.append(grid.new_ones(height * width, dtype=torch.long) * level) output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) # assign each pixel as an object query object_query = enc_output object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) object_query = object_query.masked_fill(~output_proposals_valid, float(0)) object_query = self.enc_output_norm(self.enc_output(object_query)) level_ids = torch.cat(level_ids) return object_query, output_proposals, level_ids @add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetaModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DetaModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("jozhang97/deta-swin-large-o365") >>> model = DetaModel.from_pretrained("jozhang97/deta-swin-large-o365", two_stage=False) >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 900, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # which is a list of tuples features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) sources = [] masks = [] for level, (source, mask) in enumerate(features): sources.append(self.input_proj[level](source)) masks.append(mask) if mask is None: raise ValueError("No attention mask was provided") # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage if self.config.num_feature_levels > len(sources): _len_sources = len(sources) for level in range(_len_sources, self.config.num_feature_levels): if level == _len_sources: source = self.input_proj[level](features[-1][0]) else: source = self.input_proj[level](sources[-1]) mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) sources.append(source) masks.append(mask) position_embeddings_list.append(pos_l) # Create queries query_embeds = None if not self.config.two_stage: query_embeds = self.query_position_embeddings.weight # Prepare encoder inputs (by flattening) spatial_shapes = [(source.shape[2:]) for source in sources] source_flatten = [source.flatten(2).transpose(1, 2) for source in sources] mask_flatten = [mask.flatten(1) for mask in masks] lvl_pos_embed_flatten = [] for level, pos_embed in enumerate(position_embeddings_list): pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) source_flatten = torch.cat(source_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) valid_ratios = valid_ratios.float() # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder # Also provide spatial_shapes, level_start_index and valid_ratios if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, prepare decoder inputs batch_size, _, num_channels = encoder_outputs[0].shape enc_outputs_class = None enc_outputs_coord_logits = None if self.config.two_stage: object_query_embedding, output_proposals, level_ids = self.gen_encoder_output_proposals( encoder_outputs[0], ~mask_flatten, spatial_shapes ) # hack implementation for two-stage DETA # apply a detection head to each pixel (A.4 in paper) # linear projection for bounding box binary classification (i.e. foreground and background) enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding) # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding) enc_outputs_coord_logits = delta_bbox + output_proposals # only keep top scoring `config.two_stage_num_proposals` proposals topk = self.two_stage_num_proposals proposal_logit = enc_outputs_class[..., 0] if self.assign_first_stage: proposal_boxes = center_to_corners_format(enc_outputs_coord_logits.sigmoid().float()).clamp(0, 1) topk_proposals = [] for b in range(batch_size): prop_boxes_b = proposal_boxes[b] prop_logits_b = proposal_logit[b] # pre-nms per-level topk pre_nms_topk = 1000 pre_nms_inds = [] for lvl in range(len(spatial_shapes)): lvl_mask = level_ids == lvl pre_nms_inds.append(torch.topk(prop_logits_b.sigmoid() * lvl_mask, pre_nms_topk)[1]) pre_nms_inds = torch.cat(pre_nms_inds) # nms on topk indices post_nms_inds = batched_nms( prop_boxes_b[pre_nms_inds], prop_logits_b[pre_nms_inds], level_ids[pre_nms_inds], 0.9 ) keep_inds = pre_nms_inds[post_nms_inds] if len(keep_inds) < self.two_stage_num_proposals: print( f"[WARNING] nms proposals ({len(keep_inds)}) < {self.two_stage_num_proposals}, running" " naive topk" ) keep_inds = torch.topk(proposal_logit[b], topk)[1] # keep top Q/L indices for L levels q_per_l = topk // len(spatial_shapes) is_level_ordered = ( level_ids[keep_inds][None] == torch.arange(len(spatial_shapes), device=level_ids.device)[:, None] ) keep_inds_mask = is_level_ordered & (is_level_ordered.cumsum(1) <= q_per_l) # LS keep_inds_mask = keep_inds_mask.any(0) # S # pad to Q indices (might let ones filtered from pre-nms sneak by... unlikely because we pick high conf anyways) if keep_inds_mask.sum() < topk: num_to_add = topk - keep_inds_mask.sum() pad_inds = (~keep_inds_mask).nonzero()[:num_to_add] keep_inds_mask[pad_inds] = True keep_inds_topk = keep_inds[keep_inds_mask] topk_proposals.append(keep_inds_topk) topk_proposals = torch.stack(topk_proposals) else: topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] topk_coords_logits = torch.gather( enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) ) topk_coords_logits = topk_coords_logits.detach() reference_points = topk_coords_logits.sigmoid() init_reference_points = reference_points pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits))) query_embed, target = torch.split(pos_trans_out, num_channels, dim=2) else: query_embed, target = torch.split(query_embeds, num_channels, dim=1) query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1) target = target.unsqueeze(0).expand(batch_size, -1, -1) reference_points = self.reference_points(query_embed).sigmoid() init_reference_points = reference_points decoder_outputs = self.decoder( inputs_embeds=target, position_embeddings=query_embed, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs return tuple_outputs return DetaModelOutput( init_reference_points=init_reference_points, last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, ) @add_start_docstrings( """ DETA Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, DETA_START_DOCSTRING, ) class DetaForObjectDetection(DetaPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _keys_to_ignore_on_load_missing = ["bbox_embed\.[1-9]\d*", "class_embed\.[1-9]\d*"] # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrForObjectDetection.__init__ with DeformableDetr->Deta def __init__(self, config: DetaConfig): super().__init__(config) # Deformable DETR encoder-decoder model self.model = DetaModel(config) # Detection heads on top self.class_embed = nn.Linear(config.d_model, config.num_labels) self.bbox_embed = DetaMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) # if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers if config.with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) # hack implementation for iterative bounding box refinement self.model.decoder.bbox_embed = self.bbox_embed else: nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) self.model.decoder.bbox_embed = None if config.two_stage: # hack implementation for two-stage self.model.decoder.class_embed = self.class_embed for box_embed in self.bbox_embed: nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) # Initialize weights and apply final processing self.post_init() @torch.jit.unused # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrForObjectDetection._set_aux_loss def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetaObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DetaForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("jozhang97/deta-swin-large") >>> model = DetaForObjectDetection.from_pretrained("jozhang97/deta-swin-large") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to COCO API >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected cat with confidence 0.683 at location [345.85, 23.68, 639.86, 372.83] Detected cat with confidence 0.683 at location [8.8, 52.49, 316.93, 473.45] Detected remote with confidence 0.568 at location [40.02, 73.75, 175.96, 117.33] Detected remote with confidence 0.546 at location [333.68, 77.13, 370.12, 187.51] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] init_reference = outputs.init_reference_points if return_dict else outputs[0] inter_references = outputs.intermediate_reference_points if return_dict else outputs[3] # class logits + predicted bounding boxes outputs_classes = [] outputs_coords = [] for level in range(hidden_states.shape[1]): if level == 0: reference = init_reference else: reference = inter_references[:, level - 1] reference = inverse_sigmoid(reference) outputs_class = self.class_embed[level](hidden_states[:, level]) delta_bbox = self.bbox_embed[level](hidden_states[:, level]) if reference.shape[-1] == 4: outputs_coord_logits = delta_bbox + reference elif reference.shape[-1] == 2: delta_bbox[..., :2] += reference outputs_coord_logits = delta_bbox else: raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") outputs_coord = outputs_coord_logits.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) # Keep batch_size as first dimension outputs_class = torch.stack(outputs_classes, dim=1) outputs_coord = torch.stack(outputs_coords, dim=1) logits = outputs_class[:, -1] pred_boxes = outputs_coord[:, -1] loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = DetaHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = DetaLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, num_queries=self.config.num_queries, ) criterion.to(logits.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_embed(intermediate) outputs_coord = self.bbox_embed(intermediate).sigmoid() auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs if self.config.two_stage: enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid() outputs["enc_outputs"] = {"pred_logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord} loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output return tuple_outputs dict_outputs = DetaObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, ) return dict_outputs # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class DetaLoss(nn.Module): """ This class computes the losses for `DetaForObjectDetection`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervised class and box). Args: matcher (`DetaHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__( self, matcher, num_classes, focal_alpha, losses, num_queries, assign_first_stage=False, assign_second_stage=False, ): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses self.assign_first_stage = assign_first_stage self.assign_second_stage = assign_second_stage if self.assign_first_stage: self.stg1_assigner = DetaStage1Assigner() if self.assign_second_stage: self.stg2_assigner = DetaStage2Assigner(num_queries) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_labels def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.get_loss def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets if self.assign_second_stage: indices = self.stg2_assigner(outputs_without_aux, targets) else: indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # (Niels): comment out function below, distributed training to be added # if is_dist_avail_and_initialized(): # torch.distributed.all_reduce(num_boxes) # (Niels) in original implementation, num_boxes is divided by get_world_size() num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): if not self.assign_second_stage: indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) if "enc_outputs" in outputs: enc_outputs = outputs["enc_outputs"] bin_targets = copy.deepcopy(targets) for bt in bin_targets: bt["labels"] = torch.zeros_like(bt["labels"]) if self.assign_first_stage: indices = self.stg1_assigner(enc_outputs, bin_targets) else: indices = self.matcher(enc_outputs, bin_targets) for loss in self.losses: kwargs = {} if loss == "labels": # Logging is enabled only for the last layer kwargs["log"] = False l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs) l_dict = {k + "_enc": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead class DetaMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrHungarianMatcher with DeformableDetr->Deta class DetaHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/layers/wrappers.py#L100 def nonzero_tuple(x): """ A 'as_tuple=True' version of torch.nonzero to support torchscript. because of https://github.com/pytorch/pytorch/issues/38718 """ if torch.jit.is_scripting(): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) else: return x.nonzero(as_tuple=True) # from https://github.com/facebookresearch/detectron2/blob/9921a2caa585d4fa66c4b534b6fab6e74d89b582/detectron2/modeling/matcher.py#L9 class DetaMatcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements. The matching is determined by the MxN match_quality_matrix, that characterizes how well each (ground-truth, prediction)-pair match each other. For example, if the elements are boxes, this matrix may contain box intersection-over-union overlap values. The matcher returns (a) a vector of length N containing the index of the ground-truth element m in [0, M) that matches to prediction n in [0, N). (b) a vector of length N containing the labels for each prediction. """ def __init__(self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False): """ Args: thresholds (`list[float]`): A list of thresholds used to stratify predictions into levels. labels (`list[int`): A list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1} signifying {ignore, negative class, positive class}, respectively. allow_low_quality_matches (`bool`, *optional*, defaults to `False`): If `True`, produce additional matches for predictions with maximum match quality lower than high_threshold. See `set_low_quality_matches_` for more details. For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. """ # Add -inf and +inf to first and last position in thresholds thresholds = thresholds[:] if thresholds[0] < 0: raise ValueError("Thresholds should be positive") thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) # Currently torchscript does not support all + generator if not all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])]): raise ValueError("Thresholds should be sorted.") if not all([l in [-1, 0, 1] for l in labels]): raise ValueError("All labels should be either -1, 0 or 1") if len(labels) != len(thresholds) - 1: raise ValueError("Number of labels should be equal to number of thresholds - 1") self.thresholds = thresholds self.labels = labels self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in `set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates whether a prediction is a true or false positive or ignored """ assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels def set_low_quality_matches_(self, match_labels, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth G find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth G. This function implements the RPN assignment case (i) in Sec. 3.1.2 of :paper:`Faster R-CNN`. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find the highest quality match available, even if it is low, including ties. # Note that the matches qualities must be positive due to the use of # `torch.nonzero`. _, pred_inds_with_highest_quality = nonzero_tuple(match_quality_matrix == highest_quality_foreach_gt[:, None]) # If an anchor was labeled positive only due to a low-quality match # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B. # This follows the implementation in Detectron, and is found to have no significant impact. match_labels[pred_inds_with_highest_quality] = 1 # from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/sampling.py#L9 def subsample_labels(labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int): """ Return `num_samples` (or fewer, if not enough found) random samples from `labels` which is a mixture of positives & negatives. It will try to return as many positives as possible without exceeding `positive_fraction * num_samples`, and then try to fill the remaining slots with negatives. Args: labels (Tensor): (N, ) label vector with values: * -1: ignore * bg_label: background ("negative") class * otherwise: one or more foreground ("positive") classes num_samples (int): The total number of labels with value >= 0 to return. Values that are not sampled will be filled with -1 (ignore). positive_fraction (float): The number of subsampled labels with values > 0 is `min(num_positives, int(positive_fraction * num_samples))`. The number of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. In order words, if there are not enough positives, the sample is filled with negatives. If there are also not enough negatives, then as many elements are sampled as is possible. bg_label (int): label index of background ("negative") class. Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. """ positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0] negative = nonzero_tuple(labels == bg_label)[0] num_pos = int(num_samples * positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = num_samples - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx = positive[perm1] neg_idx = negative[perm2] return pos_idx, neg_idx def sample_topk_per_gt(pr_inds, gt_inds, iou, k): if len(gt_inds) == 0: return pr_inds, gt_inds # find topk matches for each gt gt_inds2, counts = gt_inds.unique(return_counts=True) scores, pr_inds2 = iou[gt_inds2].topk(k, dim=1) gt_inds2 = gt_inds2[:, None].repeat(1, k) # filter to as many matches that gt has pr_inds3 = torch.cat([pr[:c] for c, pr in zip(counts, pr_inds2)]) gt_inds3 = torch.cat([gt[:c] for c, gt in zip(counts, gt_inds2)]) return pr_inds3, gt_inds3 # modified from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/roi_heads/roi_heads.py#L123 class DetaStage2Assigner(nn.Module): def __init__(self, num_queries, max_k=4): super().__init__() self.positive_fraction = 0.25 self.bg_label = 400 # number > 91 to filter out later self.batch_size_per_image = num_queries self.proposal_matcher = DetaMatcher(thresholds=[0.6], labels=[0, 1], allow_low_quality_matches=True) self.k = max_k def _sample_proposals(self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor): """ Based on the matching between N proposals and M groundtruth, sample the proposals and set their classification labels. Args: matched_idxs (Tensor): a vector of length N, each is the best-matched gt index in [0, M) for each proposal. matched_labels (Tensor): a vector of length N, the matcher's label (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. gt_classes (Tensor): a vector of length M. Returns: Tensor: a vector of indices of sampled proposals. Each is in [0, N). Tensor: a vector of the same length, the classification label for each sampled proposal. Each sample is labeled as either a category in [0, num_classes) or the background (num_classes). """ has_gt = gt_classes.numel() > 0 # Get the corresponding GT for each proposal if has_gt: gt_classes = gt_classes[matched_idxs] # Label unmatched proposals (0 label from matcher) as background (label=num_classes) gt_classes[matched_labels == 0] = self.bg_label # Label ignore proposals (-1 label) gt_classes[matched_labels == -1] = -1 else: gt_classes = torch.zeros_like(matched_idxs) + self.bg_label sampled_fg_idxs, sampled_bg_idxs = subsample_labels( gt_classes, self.batch_size_per_image, self.positive_fraction, self.bg_label ) sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) return sampled_idxs, gt_classes[sampled_idxs] def forward(self, outputs, targets, return_cost_matrix=False): # COCO categories are from 1 to 90. They set num_classes=91 and apply sigmoid. bs = len(targets) indices = [] ious = [] for b in range(bs): iou, _ = box_iou( center_to_corners_format(targets[b]["boxes"]), center_to_corners_format(outputs["init_reference"][b].detach()), ) matched_idxs, matched_labels = self.proposal_matcher( iou ) # proposal_id -> highest_iou_gt_id, proposal_id -> [1 if iou > 0.6, 0 ow] ( sampled_idxs, sampled_gt_classes, ) = self._sample_proposals( # list of sampled proposal_ids, sampled_id -> [0, num_classes)+[bg_label] matched_idxs, matched_labels, targets[b]["labels"] ) pos_pr_inds = sampled_idxs[sampled_gt_classes != self.bg_label] pos_gt_inds = matched_idxs[pos_pr_inds] pos_pr_inds, pos_gt_inds = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou) indices.append((pos_pr_inds, pos_gt_inds)) ious.append(iou) if return_cost_matrix: return indices, ious return indices def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k) # modified from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/proposal_generator/rpn.py#L181 class DetaStage1Assigner(nn.Module): def __init__(self, t_low=0.3, t_high=0.7, max_k=4): super().__init__() self.positive_fraction = 0.5 self.batch_size_per_image = 256 self.k = max_k self.t_low = t_low self.t_high = t_high self.anchor_matcher = DetaMatcher( thresholds=[t_low, t_high], labels=[0, -1, 1], allow_low_quality_matches=True ) def _subsample_labels(self, label): """ Randomly sample a subset of positive and negative examples, and overwrite the label vector to the ignore value (-1) for all elements that are not included in the sample. Args: labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. """ pos_idx, neg_idx = subsample_labels(label, self.batch_size_per_image, self.positive_fraction, 0) # Fill with the ignore label (-1), then set positive and negative labels label.fill_(-1) label.scatter_(0, pos_idx, 1) label.scatter_(0, neg_idx, 0) return label def forward(self, outputs, targets): bs = len(targets) indices = [] for b in range(bs): anchors = outputs["anchors"][b] if len(targets[b]["boxes"]) == 0: indices.append( ( torch.tensor([], dtype=torch.long, device=anchors.device), torch.tensor([], dtype=torch.long, device=anchors.device), ) ) continue iou, _ = box_iou( center_to_corners_format(targets[b]["boxes"]), center_to_corners_format(anchors), ) matched_idxs, matched_labels = self.anchor_matcher( iou ) # proposal_id -> highest_iou_gt_id, proposal_id -> [1 if iou > 0.7, 0 if iou < 0.3, -1 ow] matched_labels = self._subsample_labels(matched_labels) all_pr_inds = torch.arange(len(anchors)) pos_pr_inds = all_pr_inds[matched_labels == 1] pos_gt_inds = matched_idxs[pos_pr_inds] pos_pr_inds, pos_gt_inds = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou) pos_pr_inds, pos_gt_inds = pos_pr_inds.to(anchors.device), pos_gt_inds.to(anchors.device) indices.append((pos_pr_inds, pos_gt_inds)) return indices def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k)
# coding=utf-8 # Copyright 2022 SenseTime and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DETA model.""" import copy import math import warnings from dataclasses import dataclass from typing import Dict, List, Optional, Tuple import torch import torch.nn.functional as F from torch import Tensor, nn from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scipy_available, is_vision_available, replace_return_docstrings, ) from ...modeling_outputs import BaseModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import meshgrid, torch_int_div from ...utils import is_torchvision_available, logging, requires_backends from ..auto import AutoBackbone from .configuration_deta import DetaConfig logger = logging.get_logger(__name__) if is_vision_available(): from transformers.image_transforms import center_to_corners_format if is_torchvision_available(): from torchvision.ops.boxes import batched_nms if is_scipy_available(): from scipy.optimize import linear_sum_assignment logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "DetaConfig" _CHECKPOINT_FOR_DOC = "jozhang97/deta-swin-large-o365" DETA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "jozhang97/deta-swin-large-o365", # See all DETA models at https://huggingface.co/models?filter=deta ] @dataclass # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoderOutput with DeformableDetr->Deta class DetaDecoderOutput(ModelOutput): """ Base class for outputs of the DetaDecoder. This class adds two attributes to BaseModelOutputWithCrossAttentions, namely: - a stacked tensor of intermediate decoder hidden states (i.e. the output of each decoder layer) - a stacked tensor of intermediate reference points. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, sequence_length, hidden_size)`): Stacked intermediate reference points (reference points of each layer of the decoder). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` and `config.add_cross_attention=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. """ last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModelOutput with DeformableDetr->Deta,Deformable DETR->DETA class DetaModelOutput(ModelOutput): """ Base class for outputs of the Deformable DETR encoder-decoder model. Args: init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): Sequence of hidden-states at the output of the last layer of the decoder of the model. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ init_reference_points: torch.FloatTensor = None last_hidden_state: torch.FloatTensor = None intermediate_hidden_states: torch.FloatTensor = None intermediate_reference_points: torch.FloatTensor = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional[torch.FloatTensor] = None enc_outputs_coord_logits: Optional[torch.FloatTensor] = None @dataclass # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrObjectDetectionOutput with DeformableDetr->Deta class DetaObjectDetectionOutput(ModelOutput): """ Output type of [`DetaForObjectDetection`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)): Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized scale-invariant IoU loss. loss_dict (`Dict`, *optional*): A dictionary containing the individual losses. Useful for logging. logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`): Classification logits (including no-object) for all queries. pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding possible padding). You can use [`~DetaProcessor.post_process_object_detection`] to retrieve the unnormalized bounding boxes. auxiliary_outputs (`list[Dict]`, *optional*): Optional, only returned when auxilary losses are activated (i.e. `config.auxiliary_loss` is set to `True`) and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and `pred_boxes`) for each decoder layer. last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the decoder of the model. decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, num_queries, hidden_size)`. Hidden-states of the decoder at the output of each layer plus the initial embedding outputs. decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, num_queries, num_queries)`. Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the self-attention heads. cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_queries, num_heads, 4, 4)`. Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the weighted average in the cross-attention heads. encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder of the model. encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the encoder at the output of each layer plus the initial embedding outputs. encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, sequence_length, num_heads, 4, 4)`. Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the self-attention heads. intermediate_hidden_states (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, hidden_size)`): Stacked intermediate hidden states (output of each layer of the decoder). intermediate_reference_points (`torch.FloatTensor` of shape `(batch_size, config.decoder_layers, num_queries, 4)`): Stacked intermediate reference points (reference points of each layer of the decoder). init_reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`): Initial reference points sent through the Transformer decoder. enc_outputs_class (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.num_labels)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Predicted bounding boxes scores where the top `config.two_stage_num_proposals` scoring bounding boxes are picked as region proposals in the first stage. Output of bounding box binary classification (i.e. foreground and background). enc_outputs_coord_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, 4)`, *optional*, returned when `config.with_box_refine=True` and `config.two_stage=True`): Logits of predicted bounding boxes coordinates in the first stage. """ loss: Optional[torch.FloatTensor] = None loss_dict: Optional[Dict] = None logits: torch.FloatTensor = None pred_boxes: torch.FloatTensor = None auxiliary_outputs: Optional[List[Dict]] = None init_reference_points: Optional[torch.FloatTensor] = None last_hidden_state: Optional[torch.FloatTensor] = None intermediate_hidden_states: Optional[torch.FloatTensor] = None intermediate_reference_points: Optional[torch.FloatTensor] = None decoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[torch.FloatTensor]] = None cross_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_last_hidden_state: Optional[torch.FloatTensor] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None enc_outputs_class: Optional = None enc_outputs_coord_logits: Optional = None def _get_clones(module, N): return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) def inverse_sigmoid(x, eps=1e-5): x = x.clamp(min=0, max=1) x1 = x.clamp(min=eps) x2 = (1 - x).clamp(min=eps) return torch.log(x1 / x2) # Copied from transformers.models.detr.modeling_detr.DetrFrozenBatchNorm2d with Detr->Deta class DetaFrozenBatchNorm2d(nn.Module): """ BatchNorm2d where the batch statistics and the affine parameters are fixed. Copy-paste from torchvision.misc.ops with added eps before rqsrt, without which any other models than torchvision.models.resnet[18,34,50,101] produce nans. """ def __init__(self, n): super().__init__() self.register_buffer("weight", torch.ones(n)) self.register_buffer("bias", torch.zeros(n)) self.register_buffer("running_mean", torch.zeros(n)) self.register_buffer("running_var", torch.ones(n)) def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): num_batches_tracked_key = prefix + "num_batches_tracked" if num_batches_tracked_key in state_dict: del state_dict[num_batches_tracked_key] super()._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, x): # move reshapes to the beginning # to make it user-friendly weight = self.weight.reshape(1, -1, 1, 1) bias = self.bias.reshape(1, -1, 1, 1) running_var = self.running_var.reshape(1, -1, 1, 1) running_mean = self.running_mean.reshape(1, -1, 1, 1) epsilon = 1e-5 scale = weight * (running_var + epsilon).rsqrt() bias = bias - running_mean * scale return x * scale + bias # Copied from transformers.models.detr.modeling_detr.replace_batch_norm with Detr->Deta def replace_batch_norm(m, name=""): for attr_str in dir(m): target_attr = getattr(m, attr_str) if isinstance(target_attr, nn.BatchNorm2d): frozen = DetaFrozenBatchNorm2d(target_attr.num_features) bn = getattr(m, attr_str) frozen.weight.data.copy_(bn.weight) frozen.bias.data.copy_(bn.bias) frozen.running_mean.data.copy_(bn.running_mean) frozen.running_var.data.copy_(bn.running_var) setattr(m, attr_str, frozen) for n, ch in m.named_children(): replace_batch_norm(ch, n) class DetaBackboneWithPositionalEncodings(nn.Module): """ Backbone model with positional embeddings. nn.BatchNorm2d layers are replaced by DetaFrozenBatchNorm2d as defined above. """ def __init__(self, config): super().__init__() backbone = AutoBackbone.from_config(config.backbone_config) with torch.no_grad(): replace_batch_norm(backbone) self.model = backbone self.intermediate_channel_sizes = self.model.channels # TODO fix this if config.backbone_config.model_type == "resnet": for name, parameter in self.model.named_parameters(): if "stages.1" not in name and "stages.2" not in name and "stages.3" not in name: parameter.requires_grad_(False) self.position_embedding = build_position_encoding(config) def forward(self, pixel_values: torch.Tensor, pixel_mask: torch.Tensor): """ Outputs feature maps of latter stages C_3 through C_5 in ResNet if `config.num_feature_levels > 1`, otherwise outputs feature maps of C_5. """ # first, send pixel_values through the backbone to get list of feature maps features = self.model(pixel_values).feature_maps # next, create position embeddings out = [] pos = [] for feature_map in features: # downsample pixel_mask to match shape of corresponding feature_map mask = nn.functional.interpolate(pixel_mask[None].float(), size=feature_map.shape[-2:]).to(torch.bool)[0] position_embeddings = self.position_embedding(feature_map, mask).to(feature_map.dtype) out.append((feature_map, mask)) pos.append(position_embeddings) return out, pos # Copied from transformers.models.detr.modeling_detr._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, target_len: Optional[int] = None): """ Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, target_seq_len, source_seq_len]`. """ batch_size, source_len = mask.size() target_len = target_len if target_len is not None else source_len expanded_mask = mask[:, None, None, :].expand(batch_size, 1, target_len, source_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrSinePositionEmbedding with DeformableDetr->Deta class DetaSinePositionEmbedding(nn.Module): """ This is a more standard version of the position embedding, very similar to the one used by the Attention is all you need paper, generalized to work on images. """ def __init__(self, embedding_dim=64, temperature=10000, normalize=False, scale=None): super().__init__() self.embedding_dim = embedding_dim self.temperature = temperature self.normalize = normalize if scale is not None and normalize is False: raise ValueError("normalize should be True if scale is passed") if scale is None: scale = 2 * math.pi self.scale = scale def forward(self, pixel_values, pixel_mask): if pixel_mask is None: raise ValueError("No pixel mask provided") y_embed = pixel_mask.cumsum(1, dtype=torch.float32) x_embed = pixel_mask.cumsum(2, dtype=torch.float32) if self.normalize: eps = 1e-6 y_embed = (y_embed - 0.5) / (y_embed[:, -1:, :] + eps) * self.scale x_embed = (x_embed - 0.5) / (x_embed[:, :, -1:] + eps) * self.scale dim_t = torch.arange(self.embedding_dim, dtype=torch.float32, device=pixel_values.device) dim_t = self.temperature ** (2 * torch_int_div(dim_t, 2 / self.embedding_dim)) pos_x = x_embed[:, :, :, None] / dim_t pos_y = y_embed[:, :, :, None] / dim_t pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).flatten(3) pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).flatten(3) pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2) return pos # Copied from transformers.models.detr.modeling_detr.DetrLearnedPositionEmbedding class DetaLearnedPositionEmbedding(nn.Module): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, embedding_dim=256): super().__init__() self.row_embeddings = nn.Embedding(50, embedding_dim) self.column_embeddings = nn.Embedding(50, embedding_dim) def forward(self, pixel_values, pixel_mask=None): height, width = pixel_values.shape[-2:] width_values = torch.arange(width, device=pixel_values.device) height_values = torch.arange(height, device=pixel_values.device) x_emb = self.column_embeddings(width_values) y_emb = self.row_embeddings(height_values) pos = torch.cat([x_emb.unsqueeze(0).repeat(height, 1, 1), y_emb.unsqueeze(1).repeat(1, width, 1)], dim=-1) pos = pos.permute(2, 0, 1) pos = pos.unsqueeze(0) pos = pos.repeat(pixel_values.shape[0], 1, 1, 1) return pos # Copied from transformers.models.detr.modeling_detr.build_position_encoding with Detr->Deta def build_position_encoding(config): n_steps = config.d_model // 2 if config.position_embedding_type == "sine": # TODO find a better way of exposing other arguments position_embedding = DetaSinePositionEmbedding(n_steps, normalize=True) elif config.position_embedding_type == "learned": position_embedding = DetaLearnedPositionEmbedding(n_steps) else: raise ValueError(f"Not supported {config.position_embedding_type}") return position_embedding # Copied from transformers.models.deformable_detr.modeling_deformable_detr.multi_scale_deformable_attention def multi_scale_deformable_attention( value: Tensor, value_spatial_shapes: Tensor, sampling_locations: Tensor, attention_weights: Tensor ) -> Tensor: batch_size, _, num_heads, hidden_dim = value.shape _, num_queries, num_heads, num_levels, num_points, _ = sampling_locations.shape value_list = value.split([height * width for height, width in value_spatial_shapes], dim=1) sampling_grids = 2 * sampling_locations - 1 sampling_value_list = [] for level_id, (height, width) in enumerate(value_spatial_shapes): # batch_size, height*width, num_heads, hidden_dim # -> batch_size, height*width, num_heads*hidden_dim # -> batch_size, num_heads*hidden_dim, height*width # -> batch_size*num_heads, hidden_dim, height, width value_l_ = ( value_list[level_id].flatten(2).transpose(1, 2).reshape(batch_size * num_heads, hidden_dim, height, width) ) # batch_size, num_queries, num_heads, num_points, 2 # -> batch_size, num_heads, num_queries, num_points, 2 # -> batch_size*num_heads, num_queries, num_points, 2 sampling_grid_l_ = sampling_grids[:, :, :, level_id].transpose(1, 2).flatten(0, 1) # batch_size*num_heads, hidden_dim, num_queries, num_points sampling_value_l_ = nn.functional.grid_sample( value_l_, sampling_grid_l_, mode="bilinear", padding_mode="zeros", align_corners=False ) sampling_value_list.append(sampling_value_l_) # (batch_size, num_queries, num_heads, num_levels, num_points) # -> (batch_size, num_heads, num_queries, num_levels, num_points) # -> (batch_size, num_heads, 1, num_queries, num_levels*num_points) attention_weights = attention_weights.transpose(1, 2).reshape( batch_size * num_heads, 1, num_queries, num_levels * num_points ) output = ( (torch.stack(sampling_value_list, dim=-2).flatten(-2) * attention_weights) .sum(-1) .view(batch_size, num_heads * hidden_dim, num_queries) ) return output.transpose(1, 2).contiguous() class DetaMultiscaleDeformableAttention(nn.Module): """ Multiscale deformable attention as proposed in Deformable DETR. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrMultiscaleDeformableAttention.__init__ with DeformableDetr->Deta def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int): super().__init__() if embed_dim % num_heads != 0: raise ValueError( f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}" ) dim_per_head = embed_dim // num_heads # check if dim_per_head is power of 2 if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0): warnings.warn( "You'd better set embed_dim (d_model) in DetaMultiscaleDeformableAttention to make the" " dimension of each attention head a power of 2 which is more efficient in the authors' CUDA" " implementation." ) self.im2col_step = 64 self.d_model = embed_dim self.n_levels = n_levels self.n_heads = num_heads self.n_points = n_points self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2) self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points) self.value_proj = nn.Linear(embed_dim, embed_dim) self.output_proj = nn.Linear(embed_dim, embed_dim) self._reset_parameters() def _reset_parameters(self): nn.init.constant_(self.sampling_offsets.weight.data, 0.0) thetas = torch.arange(self.n_heads, dtype=torch.float32) * (2.0 * math.pi / self.n_heads) grid_init = torch.stack([thetas.cos(), thetas.sin()], -1) grid_init = ( (grid_init / grid_init.abs().max(-1, keepdim=True)[0]) .view(self.n_heads, 1, 1, 2) .repeat(1, self.n_levels, self.n_points, 1) ) for i in range(self.n_points): grid_init[:, :, i, :] *= i + 1 with torch.no_grad(): self.sampling_offsets.bias = nn.Parameter(grid_init.view(-1)) nn.init.constant_(self.attention_weights.weight.data, 0.0) nn.init.constant_(self.attention_weights.bias.data, 0.0) nn.init.xavier_uniform_(self.value_proj.weight.data) nn.init.constant_(self.value_proj.bias.data, 0.0) nn.init.xavier_uniform_(self.output_proj.weight.data) nn.init.constant_(self.output_proj.bias.data, 0.0) def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states = self.with_pos_embed(hidden_states, position_embeddings) batch_size, num_queries, _ = hidden_states.shape batch_size, sequence_length, _ = encoder_hidden_states.shape if (spatial_shapes[:, 0] * spatial_shapes[:, 1]).sum() != sequence_length: raise ValueError( "Make sure to align the spatial shapes with the sequence length of the encoder hidden states" ) value = self.value_proj(encoder_hidden_states) if attention_mask is not None: # we invert the attention_mask value = value.masked_fill(~attention_mask[..., None], float(0)) value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads) sampling_offsets = self.sampling_offsets(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2 ) attention_weights = self.attention_weights(hidden_states).view( batch_size, num_queries, self.n_heads, self.n_levels * self.n_points ) attention_weights = F.softmax(attention_weights, -1).view( batch_size, num_queries, self.n_heads, self.n_levels, self.n_points ) # batch_size, num_queries, n_heads, n_levels, n_points, 2 if reference_points.shape[-1] == 2: offset_normalizer = torch.stack([spatial_shapes[..., 1], spatial_shapes[..., 0]], -1) sampling_locations = ( reference_points[:, :, None, :, None, :] + sampling_offsets / offset_normalizer[None, None, None, :, None, :] ) elif reference_points.shape[-1] == 4: sampling_locations = ( reference_points[:, :, None, :, None, :2] + sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5 ) else: raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}") # PyTorch implementation (for now) output = multi_scale_deformable_attention(value, spatial_shapes, sampling_locations, attention_weights) output = self.output_proj(output) return output, attention_weights # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrMultiheadAttention with DeformableDetr->Deta,Deformable DETR->DETA class DetaMultiheadAttention(nn.Module): """ Multi-headed attention from 'Attention Is All You Need' paper. Here, we add position embeddings to the queries and keys (as explained in the Deformable DETR paper). """ def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if self.head_dim * num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, batch_size: int): return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]): return tensor if position_embeddings is None else tensor + position_embeddings def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_embeddings: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" batch_size, target_len, embed_dim = hidden_states.size() # add position embeddings to the hidden states before projecting to queries and keys if position_embeddings is not None: hidden_states_original = hidden_states hidden_states = self.with_pos_embed(hidden_states, position_embeddings) # get queries, keys and values query_states = self.q_proj(hidden_states) * self.scaling key_states = self._shape(self.k_proj(hidden_states), -1, batch_size) value_states = self._shape(self.v_proj(hidden_states_original), -1, batch_size) proj_shape = (batch_size * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, target_len, batch_size).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) source_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (batch_size * self.num_heads, target_len, source_len): raise ValueError( f"Attention weights should be of size {(batch_size * self.num_heads, target_len, source_len)}, but is" f" {attn_weights.size()}" ) # expand attention_mask if attention_mask is not None: # [batch_size, seq_len] -> [batch_size, 1, target_seq_len, source_seq_len] attention_mask = _expand_mask(attention_mask, hidden_states.dtype) if attention_mask is not None: if attention_mask.size() != (batch_size, 1, target_len, source_len): raise ValueError( f"Attention mask should be of size {(batch_size, 1, target_len, source_len)}, but is" f" {attention_mask.size()}" ) attn_weights = attn_weights.view(batch_size, self.num_heads, target_len, source_len) + attention_mask attn_weights = attn_weights.view(batch_size * self.num_heads, target_len, source_len) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit awkward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(batch_size, self.num_heads, target_len, source_len) attn_weights = attn_weights_reshaped.view(batch_size * self.num_heads, target_len, source_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (batch_size * self.num_heads, target_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(batch_size, self.num_heads, target_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(batch_size, self.num_heads, target_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(batch_size, target_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrEncoderLayer with DeformableDetr->Deta class DetaEncoderLayer(nn.Module): def __init__(self, config: DetaConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = DetaMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.encoder_n_points, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, position_embeddings: torch.Tensor = None, reference_points=None, spatial_shapes=None, level_start_index=None, output_attentions: bool = False, ): """ Args: hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Input to the layer. attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Attention mask. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings, to be added to `hidden_states`. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes of the backbone feature maps. level_start_index (`torch.LongTensor`, *optional*): Level start index. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Apply Multi-scale Deformable Attention Module on the multi-scale feature maps. hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, encoder_hidden_states=hidden_states, encoder_attention_mask=attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) if self.training: if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any(): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoderLayer with DeformableDetr->Deta class DetaDecoderLayer(nn.Module): def __init__(self, config: DetaConfig): super().__init__() self.embed_dim = config.d_model # self-attention self.self_attn = DetaMultiheadAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) # cross-attention self.encoder_attn = DetaMultiscaleDeformableAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, n_levels=config.num_feature_levels, n_points=config.decoder_n_points, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) # feedforward neural networks self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, position_embeddings: Optional[torch.Tensor] = None, reference_points=None, spatial_shapes=None, level_start_index=None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = False, ): """ Args: hidden_states (`torch.FloatTensor`): Input to the layer of shape `(seq_len, batch, embed_dim)`. position_embeddings (`torch.FloatTensor`, *optional*): Position embeddings that are added to the queries and keys in the self-attention layer. reference_points (`torch.FloatTensor`, *optional*): Reference points. spatial_shapes (`torch.LongTensor`, *optional*): Spatial shapes. level_start_index (`torch.LongTensor`, *optional*): Level start index. encoder_hidden_states (`torch.FloatTensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size `(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative values. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states # Self Attention hidden_states, self_attn_weights = self.self_attn( hidden_states=hidden_states, position_embeddings=position_embeddings, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) second_residual = hidden_states # Cross-Attention cross_attn_weights = None hidden_states, cross_attn_weights = self.encoder_attn( hidden_states=hidden_states, attention_mask=encoder_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = second_residual + hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # Fully Connected residual = hidden_states hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) return outputs # Copied from transformers.models.detr.modeling_detr.DetrClassificationHead class DetaClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float): super().__init__() self.dense = nn.Linear(input_dim, inner_dim) self.dropout = nn.Dropout(p=pooler_dropout) self.out_proj = nn.Linear(inner_dim, num_classes) def forward(self, hidden_states: torch.Tensor): hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrPreTrainedModel with DeformableDetr->Deta class DetaPreTrainedModel(PreTrainedModel): config_class = DetaConfig base_model_prefix = "model" main_input_name = "pixel_values" def _init_weights(self, module): std = self.config.init_std if isinstance(module, DetaLearnedPositionEmbedding): nn.init.uniform_(module.row_embeddings.weight) nn.init.uniform_(module.column_embeddings.weight) elif isinstance(module, DetaMultiscaleDeformableAttention): module._reset_parameters() elif isinstance(module, (nn.Linear, nn.Conv2d, nn.BatchNorm2d)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() if hasattr(module, "reference_points") and not self.config.two_stage: nn.init.xavier_uniform_(module.reference_points.weight.data, gain=1.0) nn.init.constant_(module.reference_points.bias.data, 0.0) if hasattr(module, "level_embed"): nn.init.normal_(module.level_embed) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, DetaDecoder): module.gradient_checkpointing = value DETA_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`DetaConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ DETA_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`AutoImageProcessor.__call__`] for details. pixel_mask (`torch.LongTensor` of shape `(batch_size, height, width)`, *optional*): Mask to avoid performing attention on padding pixel values. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, num_queries)`, *optional*): Not used by default. Can be used to mask object queries. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*): Tuple consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*: `attentions`) `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing the flattened feature map (output of the backbone + projection layer), you can choose to directly pass a flattened representation of an image. decoder_inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Optionally, instead of initializing the queries with a tensor of zeros, you can choose to directly pass an embedded representation. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrEncoder with DeformableDetr->Deta class DetaEncoder(DetaPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* deformable attention layers. Each layer is a [`DetaEncoderLayer`]. The encoder updates the flattened multi-scale feature maps through multiple deformable attention layers. Args: config: DetaConfig """ def __init__(self, config: DetaConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DetaEncoderLayer(config) for _ in range(config.encoder_layers)]) # Initialize weights and apply final processing self.post_init() @staticmethod def get_reference_points(spatial_shapes, valid_ratios, device): """ Get reference points for each feature map. Used in decoder. Args: spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Valid ratios of each feature map. device (`torch.device`): Device on which to create the tensors. Returns: `torch.FloatTensor` of shape `(batch_size, num_queries, num_feature_levels, 2)` """ reference_points_list = [] for level, (height, width) in enumerate(spatial_shapes): ref_y, ref_x = meshgrid( torch.linspace(0.5, height - 0.5, height, dtype=torch.float32, device=device), torch.linspace(0.5, width - 0.5, width, dtype=torch.float32, device=device), indexing="ij", ) # TODO: valid_ratios could be useless here. check https://github.com/fundamentalvision/Deformable-DETR/issues/36 ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, level, 1] * height) ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, level, 0] * width) ref = torch.stack((ref_x, ref_y), -1) reference_points_list.append(ref) reference_points = torch.cat(reference_points_list, 1) reference_points = reference_points[:, :, None] * valid_ratios[:, None] return reference_points def forward( self, inputs_embeds=None, attention_mask=None, position_embeddings=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Flattened feature map (output of the backbone + projection layer) that is passed to the encoder. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding pixel features. Mask values selected in `[0, 1]`: - 1 for pixel features that are real (i.e. **not masked**), - 0 for pixel features that are padding (i.e. **masked**). [What are attention masks?](../glossary#attention-mask) position_embeddings (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Position embeddings that are added to the queries and keys in each self-attention layer. spatial_shapes (`torch.LongTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of each feature map. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`): Starting index of each feature map. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict hidden_states = inputs_embeds hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) reference_points = self.get_reference_points(spatial_shapes, valid_ratios, device=inputs_embeds.device) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) layer_outputs = encoder_layer( hidden_states, attention_mask, position_embeddings=position_embeddings, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrDecoder with DeformableDetr->Deta,Deformable DETR->DETA class DetaDecoder(DetaPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a [`DetaDecoderLayer`]. The decoder updates the query embeddings through multiple self-attention and cross-attention layers. Some tweaks for Deformable DETR: - `position_embeddings`, `reference_points`, `spatial_shapes` and `valid_ratios` are added to the forward pass. - it also returns a stack of intermediate outputs and reference points from all decoding layers. Args: config: DetaConfig """ def __init__(self, config: DetaConfig): super().__init__(config) self.dropout = config.dropout self.layers = nn.ModuleList([DetaDecoderLayer(config) for _ in range(config.decoder_layers)]) self.gradient_checkpointing = False # hack implementation for iterative bounding box refinement and two-stage Deformable DETR self.bbox_embed = None self.class_embed = None # Initialize weights and apply final processing self.post_init() def forward( self, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, position_embeddings=None, reference_points=None, spatial_shapes=None, level_start_index=None, valid_ratios=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`): The query embeddings that are passed into the decoder. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing cross-attention on padding pixel_values of the encoder. Mask values selected in `[0, 1]`: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). position_embeddings (`torch.FloatTensor` of shape `(batch_size, num_queries, hidden_size)`, *optional*): Position embeddings that are added to the queries and keys in each self-attention layer. reference_points (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)` is `as_two_stage` else `(batch_size, num_queries, 2)` or , *optional*): Reference point in range `[0, 1]`, top-left (0,0), bottom-right (1, 1), including padding area. spatial_shapes (`torch.FloatTensor` of shape `(num_feature_levels, 2)`): Spatial shapes of the feature maps. level_start_index (`torch.LongTensor` of shape `(num_feature_levels)`, *optional*): Indexes for the start of each feature level. In range `[0, sequence_length]`. valid_ratios (`torch.FloatTensor` of shape `(batch_size, num_feature_levels, 2)`, *optional*): Ratio of valid area in each feature level. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if inputs_embeds is not None: hidden_states = inputs_embeds # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None intermediate = () intermediate_reference_points = () for idx, decoder_layer in enumerate(self.layers): if reference_points.shape[-1] == 4: reference_points_input = ( reference_points[:, :, None] * torch.cat([valid_ratios, valid_ratios], -1)[:, None] ) else: if reference_points.shape[-1] != 2: raise ValueError("Reference points' last dimension must be of size 2") reference_points_input = reference_points[:, :, None] * valid_ratios[:, None] if output_hidden_states: all_hidden_states += (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, encoder_hidden_states, encoder_attention_mask, None, ) else: layer_outputs = decoder_layer( hidden_states, position_embeddings=position_embeddings, encoder_hidden_states=encoder_hidden_states, reference_points=reference_points_input, spatial_shapes=spatial_shapes, level_start_index=level_start_index, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] # hack implementation for iterative bounding box refinement if self.bbox_embed is not None: tmp = self.bbox_embed[idx](hidden_states) if reference_points.shape[-1] == 4: new_reference_points = tmp + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() else: if reference_points.shape[-1] != 2: raise ValueError( f"Reference points' last dimension must be of size 2, but is {reference_points.shape[-1]}" ) new_reference_points = tmp new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points) new_reference_points = new_reference_points.sigmoid() reference_points = new_reference_points.detach() intermediate += (hidden_states,) intermediate_reference_points += (reference_points,) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) # Keep batch_size as first dimension intermediate = torch.stack(intermediate, dim=1) intermediate_reference_points = torch.stack(intermediate_reference_points, dim=1) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, intermediate, intermediate_reference_points, all_hidden_states, all_self_attns, all_cross_attentions, ] if v is not None ) return DetaDecoderOutput( last_hidden_state=hidden_states, intermediate_hidden_states=intermediate, intermediate_reference_points=intermediate_reference_points, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( """ The bare DETA Model (consisting of a backbone and encoder-decoder Transformer) outputting raw hidden-states without any specific head on top. """, DETA_START_DOCSTRING, ) class DetaModel(DetaPreTrainedModel): def __init__(self, config: DetaConfig): super().__init__(config) if config.two_stage: requires_backends(self, ["torchvision"]) # Create backbone with positional encoding self.backbone = DetaBackboneWithPositionalEncodings(config) intermediate_channel_sizes = self.backbone.intermediate_channel_sizes # Create input projection layers if config.num_feature_levels > 1: num_backbone_outs = len(intermediate_channel_sizes) input_proj_list = [] for _ in range(num_backbone_outs): in_channels = intermediate_channel_sizes[_] input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ) for _ in range(config.num_feature_levels - num_backbone_outs): input_proj_list.append( nn.Sequential( nn.Conv2d(in_channels, config.d_model, kernel_size=3, stride=2, padding=1), nn.GroupNorm(32, config.d_model), ) ) in_channels = config.d_model self.input_proj = nn.ModuleList(input_proj_list) else: self.input_proj = nn.ModuleList( [ nn.Sequential( nn.Conv2d(intermediate_channel_sizes[-1], config.d_model, kernel_size=1), nn.GroupNorm(32, config.d_model), ) ] ) if not config.two_stage: self.query_position_embeddings = nn.Embedding(config.num_queries, config.d_model * 2) self.encoder = DetaEncoder(config) self.decoder = DetaDecoder(config) self.level_embed = nn.Parameter(torch.Tensor(config.num_feature_levels, config.d_model)) if config.two_stage: self.enc_output = nn.Linear(config.d_model, config.d_model) self.enc_output_norm = nn.LayerNorm(config.d_model) self.pos_trans = nn.Linear(config.d_model * 2, config.d_model * 2) self.pos_trans_norm = nn.LayerNorm(config.d_model * 2) self.pix_trans = nn.Linear(config.d_model, config.d_model) self.pix_trans_norm = nn.LayerNorm(config.d_model) else: self.reference_points = nn.Linear(config.d_model, 2) self.assign_first_stage = config.assign_first_stage self.two_stage_num_proposals = config.two_stage_num_proposals self.post_init() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_encoder def get_encoder(self): return self.encoder # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_decoder def get_decoder(self): return self.decoder # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.freeze_backbone def freeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(False) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.unfreeze_backbone def unfreeze_backbone(self): for name, param in self.backbone.conv_encoder.model.named_parameters(): param.requires_grad_(True) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_valid_ratio def get_valid_ratio(self, mask): """Get the valid ratio of all feature maps.""" _, height, width = mask.shape valid_height = torch.sum(mask[:, :, 0], 1) valid_width = torch.sum(mask[:, 0, :], 1) valid_ratio_heigth = valid_height.float() / height valid_ratio_width = valid_width.float() / width valid_ratio = torch.stack([valid_ratio_width, valid_ratio_heigth], -1) return valid_ratio # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrModel.get_proposal_pos_embed def get_proposal_pos_embed(self, proposals): """Get the position embedding of the proposals.""" num_pos_feats = 128 temperature = 10000 scale = 2 * math.pi dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device) dim_t = temperature ** (2 * torch.div(dim_t, 2) / num_pos_feats) # batch_size, num_queries, 4 proposals = proposals.sigmoid() * scale # batch_size, num_queries, 4, 128 pos = proposals[:, :, :, None] / dim_t # batch_size, num_queries, 4, 64, 2 -> batch_size, num_queries, 512 pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2) return pos def gen_encoder_output_proposals(self, enc_output, padding_mask, spatial_shapes): """Generate the encoder output proposals from encoded enc_output. Args: enc_output (Tensor[batch_size, sequence_length, hidden_size]): Output of the encoder. padding_mask (Tensor[batch_size, sequence_length]): Padding mask for `enc_output`. spatial_shapes (Tensor[num_feature_levels, 2]): Spatial shapes of the feature maps. Returns: `tuple(torch.FloatTensor)`: A tuple of feature map and bbox prediction. - object_query (Tensor[batch_size, sequence_length, hidden_size]): Object query features. Later used to directly predict a bounding box. (without the need of a decoder) - output_proposals (Tensor[batch_size, sequence_length, 4]): Normalized proposals, after an inverse sigmoid. """ batch_size = enc_output.shape[0] proposals = [] _cur = 0 level_ids = [] for level, (height, width) in enumerate(spatial_shapes): mask_flatten_ = padding_mask[:, _cur : (_cur + height * width)].view(batch_size, height, width, 1) valid_height = torch.sum(~mask_flatten_[:, :, 0, 0], 1) valid_width = torch.sum(~mask_flatten_[:, 0, :, 0], 1) grid_y, grid_x = meshgrid( torch.linspace(0, height - 1, height, dtype=torch.float32, device=enc_output.device), torch.linspace(0, width - 1, width, dtype=torch.float32, device=enc_output.device), indexing="ij", ) grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1) scale = torch.cat([valid_width.unsqueeze(-1), valid_height.unsqueeze(-1)], 1).view(batch_size, 1, 1, 2) grid = (grid.unsqueeze(0).expand(batch_size, -1, -1, -1) + 0.5) / scale width_heigth = torch.ones_like(grid) * 0.05 * (2.0**level) proposal = torch.cat((grid, width_heigth), -1).view(batch_size, -1, 4) proposals.append(proposal) _cur += height * width level_ids.append(grid.new_ones(height * width, dtype=torch.long) * level) output_proposals = torch.cat(proposals, 1) output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(-1, keepdim=True) output_proposals = torch.log(output_proposals / (1 - output_proposals)) # inverse sigmoid output_proposals = output_proposals.masked_fill(padding_mask.unsqueeze(-1), float("inf")) output_proposals = output_proposals.masked_fill(~output_proposals_valid, float("inf")) # assign each pixel as an object query object_query = enc_output object_query = object_query.masked_fill(padding_mask.unsqueeze(-1), float(0)) object_query = object_query.masked_fill(~output_proposals_valid, float(0)) object_query = self.enc_output_norm(self.enc_output(object_query)) level_ids = torch.cat(level_ids) return object_query, output_proposals, level_ids @add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetaModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DetaModel >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("jozhang97/deta-swin-large-o365") >>> model = DetaModel.from_pretrained("jozhang97/deta-swin-large-o365", two_stage=False) >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) [1, 900, 256] ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict batch_size, num_channels, height, width = pixel_values.shape device = pixel_values.device if pixel_mask is None: pixel_mask = torch.ones(((batch_size, height, width)), dtype=torch.long, device=device) # Extract multi-scale feature maps of same resolution `config.d_model` (cf Figure 4 in paper) # First, sent pixel_values + pixel_mask through Backbone to obtain the features # which is a list of tuples features, position_embeddings_list = self.backbone(pixel_values, pixel_mask) # Then, apply 1x1 convolution to reduce the channel dimension to d_model (256 by default) sources = [] masks = [] for level, (source, mask) in enumerate(features): sources.append(self.input_proj[level](source)) masks.append(mask) if mask is None: raise ValueError("No attention mask was provided") # Lowest resolution feature maps are obtained via 3x3 stride 2 convolutions on the final stage if self.config.num_feature_levels > len(sources): _len_sources = len(sources) for level in range(_len_sources, self.config.num_feature_levels): if level == _len_sources: source = self.input_proj[level](features[-1][0]) else: source = self.input_proj[level](sources[-1]) mask = nn.functional.interpolate(pixel_mask[None].float(), size=source.shape[-2:]).to(torch.bool)[0] pos_l = self.backbone.position_embedding(source, mask).to(source.dtype) sources.append(source) masks.append(mask) position_embeddings_list.append(pos_l) # Create queries query_embeds = None if not self.config.two_stage: query_embeds = self.query_position_embeddings.weight # Prepare encoder inputs (by flattening) spatial_shapes = [(source.shape[2:]) for source in sources] source_flatten = [source.flatten(2).transpose(1, 2) for source in sources] mask_flatten = [mask.flatten(1) for mask in masks] lvl_pos_embed_flatten = [] for level, pos_embed in enumerate(position_embeddings_list): pos_embed = pos_embed.flatten(2).transpose(1, 2) lvl_pos_embed = pos_embed + self.level_embed[level].view(1, 1, -1) lvl_pos_embed_flatten.append(lvl_pos_embed) source_flatten = torch.cat(source_flatten, 1) mask_flatten = torch.cat(mask_flatten, 1) lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) spatial_shapes = torch.as_tensor(spatial_shapes, dtype=torch.long, device=source_flatten.device) level_start_index = torch.cat((spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])) valid_ratios = torch.stack([self.get_valid_ratio(m) for m in masks], 1) valid_ratios = valid_ratios.float() # Fourth, sent source_flatten + mask_flatten + lvl_pos_embed_flatten (backbone + proj layer output) through encoder # Also provide spatial_shapes, level_start_index and valid_ratios if encoder_outputs is None: encoder_outputs = self.encoder( inputs_embeds=source_flatten, attention_mask=mask_flatten, position_embeddings=lvl_pos_embed_flatten, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # Fifth, prepare decoder inputs batch_size, _, num_channels = encoder_outputs[0].shape enc_outputs_class = None enc_outputs_coord_logits = None if self.config.two_stage: object_query_embedding, output_proposals, level_ids = self.gen_encoder_output_proposals( encoder_outputs[0], ~mask_flatten, spatial_shapes ) # hack implementation for two-stage DETA # apply a detection head to each pixel (A.4 in paper) # linear projection for bounding box binary classification (i.e. foreground and background) enc_outputs_class = self.decoder.class_embed[-1](object_query_embedding) # 3-layer FFN to predict bounding boxes coordinates (bbox regression branch) delta_bbox = self.decoder.bbox_embed[-1](object_query_embedding) enc_outputs_coord_logits = delta_bbox + output_proposals # only keep top scoring `config.two_stage_num_proposals` proposals topk = self.two_stage_num_proposals proposal_logit = enc_outputs_class[..., 0] if self.assign_first_stage: proposal_boxes = center_to_corners_format(enc_outputs_coord_logits.sigmoid().float()).clamp(0, 1) topk_proposals = [] for b in range(batch_size): prop_boxes_b = proposal_boxes[b] prop_logits_b = proposal_logit[b] # pre-nms per-level topk pre_nms_topk = 1000 pre_nms_inds = [] for lvl in range(len(spatial_shapes)): lvl_mask = level_ids == lvl pre_nms_inds.append(torch.topk(prop_logits_b.sigmoid() * lvl_mask, pre_nms_topk)[1]) pre_nms_inds = torch.cat(pre_nms_inds) # nms on topk indices post_nms_inds = batched_nms( prop_boxes_b[pre_nms_inds], prop_logits_b[pre_nms_inds], level_ids[pre_nms_inds], 0.9 ) keep_inds = pre_nms_inds[post_nms_inds] if len(keep_inds) < self.two_stage_num_proposals: print( f"[WARNING] nms proposals ({len(keep_inds)}) < {self.two_stage_num_proposals}, running" " naive topk" ) keep_inds = torch.topk(proposal_logit[b], topk)[1] # keep top Q/L indices for L levels q_per_l = topk // len(spatial_shapes) is_level_ordered = ( level_ids[keep_inds][None] == torch.arange(len(spatial_shapes), device=level_ids.device)[:, None] ) keep_inds_mask = is_level_ordered & (is_level_ordered.cumsum(1) <= q_per_l) # LS keep_inds_mask = keep_inds_mask.any(0) # S # pad to Q indices (might let ones filtered from pre-nms sneak by... unlikely because we pick high conf anyways) if keep_inds_mask.sum() < topk: num_to_add = topk - keep_inds_mask.sum() pad_inds = (~keep_inds_mask).nonzero()[:num_to_add] keep_inds_mask[pad_inds] = True keep_inds_topk = keep_inds[keep_inds_mask] topk_proposals.append(keep_inds_topk) topk_proposals = torch.stack(topk_proposals) else: topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1] topk_coords_logits = torch.gather( enc_outputs_coord_logits, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4) ) topk_coords_logits = topk_coords_logits.detach() reference_points = topk_coords_logits.sigmoid() init_reference_points = reference_points pos_trans_out = self.pos_trans_norm(self.pos_trans(self.get_proposal_pos_embed(topk_coords_logits))) query_embed, target = torch.split(pos_trans_out, num_channels, dim=2) else: query_embed, target = torch.split(query_embeds, num_channels, dim=1) query_embed = query_embed.unsqueeze(0).expand(batch_size, -1, -1) target = target.unsqueeze(0).expand(batch_size, -1, -1) reference_points = self.reference_points(query_embed).sigmoid() init_reference_points = reference_points decoder_outputs = self.decoder( inputs_embeds=target, position_embeddings=query_embed, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=mask_flatten, reference_points=reference_points, spatial_shapes=spatial_shapes, level_start_index=level_start_index, valid_ratios=valid_ratios, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: enc_outputs = tuple(value for value in [enc_outputs_class, enc_outputs_coord_logits] if value is not None) tuple_outputs = (init_reference_points,) + decoder_outputs + encoder_outputs + enc_outputs return tuple_outputs return DetaModelOutput( init_reference_points=init_reference_points, last_hidden_state=decoder_outputs.last_hidden_state, intermediate_hidden_states=decoder_outputs.intermediate_hidden_states, intermediate_reference_points=decoder_outputs.intermediate_reference_points, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, enc_outputs_class=enc_outputs_class, enc_outputs_coord_logits=enc_outputs_coord_logits, ) @add_start_docstrings( """ DETA Model (consisting of a backbone and encoder-decoder Transformer) with object detection heads on top, for tasks such as COCO detection. """, DETA_START_DOCSTRING, ) class DetaForObjectDetection(DetaPreTrainedModel): # When using clones, all layers > 0 will be clones, but layer 0 *is* required _keys_to_ignore_on_load_missing = ["bbox_embed\.[1-9]\d*", "class_embed\.[1-9]\d*"] # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrForObjectDetection.__init__ with DeformableDetr->Deta def __init__(self, config: DetaConfig): super().__init__(config) # Deformable DETR encoder-decoder model self.model = DetaModel(config) # Detection heads on top self.class_embed = nn.Linear(config.d_model, config.num_labels) self.bbox_embed = DetaMLPPredictionHead( input_dim=config.d_model, hidden_dim=config.d_model, output_dim=4, num_layers=3 ) prior_prob = 0.01 bias_value = -math.log((1 - prior_prob) / prior_prob) self.class_embed.bias.data = torch.ones(config.num_labels) * bias_value nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0) nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0) # if two-stage, the last class_embed and bbox_embed is for region proposal generation num_pred = (config.decoder_layers + 1) if config.two_stage else config.decoder_layers if config.with_box_refine: self.class_embed = _get_clones(self.class_embed, num_pred) self.bbox_embed = _get_clones(self.bbox_embed, num_pred) nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0) # hack implementation for iterative bounding box refinement self.model.decoder.bbox_embed = self.bbox_embed else: nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0) self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)]) self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)]) self.model.decoder.bbox_embed = None if config.two_stage: # hack implementation for two-stage self.model.decoder.class_embed = self.class_embed for box_embed in self.bbox_embed: nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0) # Initialize weights and apply final processing self.post_init() @torch.jit.unused # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrForObjectDetection._set_aux_loss def _set_aux_loss(self, outputs_class, outputs_coord): # this is a workaround to make torchscript happy, as torchscript # doesn't support dictionary with non-homogeneous values, such # as a dict having both a Tensor and a list. return [{"logits": a, "pred_boxes": b} for a, b in zip(outputs_class[:-1], outputs_coord[:-1])] @add_start_docstrings_to_model_forward(DETA_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=DetaObjectDetectionOutput, config_class=_CONFIG_FOR_DOC) def forward( self, pixel_values, pixel_mask=None, decoder_attention_mask=None, encoder_outputs=None, inputs_embeds=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (`List[Dict]` of len `(batch_size,)`, *optional*): Labels for computing the bipartite matching loss. List of dicts, each dictionary containing at least the following 2 keys: 'class_labels' and 'boxes' (the class labels and bounding boxes of an image in the batch respectively). The class labels themselves should be a `torch.LongTensor` of len `(number of bounding boxes in the image,)` and the boxes a `torch.FloatTensor` of shape `(number of bounding boxes in the image, 4)`. Returns: Examples: ```python >>> from transformers import AutoImageProcessor, DetaForObjectDetection >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> image_processor = AutoImageProcessor.from_pretrained("jozhang97/deta-swin-large") >>> model = DetaForObjectDetection.from_pretrained("jozhang97/deta-swin-large") >>> inputs = image_processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> # convert outputs (bounding boxes and class logits) to COCO API >>> target_sizes = torch.tensor([image.size[::-1]]) >>> results = image_processor.post_process_object_detection(outputs, threshold=0.5, target_sizes=target_sizes)[ ... 0 ... ] >>> for score, label, box in zip(results["scores"], results["labels"], results["boxes"]): ... box = [round(i, 2) for i in box.tolist()] ... print( ... f"Detected {model.config.id2label[label.item()]} with confidence " ... f"{round(score.item(), 3)} at location {box}" ... ) Detected cat with confidence 0.683 at location [345.85, 23.68, 639.86, 372.83] Detected cat with confidence 0.683 at location [8.8, 52.49, 316.93, 473.45] Detected remote with confidence 0.568 at location [40.02, 73.75, 175.96, 117.33] Detected remote with confidence 0.546 at location [333.68, 77.13, 370.12, 187.51] ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict # First, sent images through DETR base model to obtain encoder + decoder outputs outputs = self.model( pixel_values, pixel_mask=pixel_mask, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs.intermediate_hidden_states if return_dict else outputs[2] init_reference = outputs.init_reference_points if return_dict else outputs[0] inter_references = outputs.intermediate_reference_points if return_dict else outputs[3] # class logits + predicted bounding boxes outputs_classes = [] outputs_coords = [] for level in range(hidden_states.shape[1]): if level == 0: reference = init_reference else: reference = inter_references[:, level - 1] reference = inverse_sigmoid(reference) outputs_class = self.class_embed[level](hidden_states[:, level]) delta_bbox = self.bbox_embed[level](hidden_states[:, level]) if reference.shape[-1] == 4: outputs_coord_logits = delta_bbox + reference elif reference.shape[-1] == 2: delta_bbox[..., :2] += reference outputs_coord_logits = delta_bbox else: raise ValueError(f"reference.shape[-1] should be 4 or 2, but got {reference.shape[-1]}") outputs_coord = outputs_coord_logits.sigmoid() outputs_classes.append(outputs_class) outputs_coords.append(outputs_coord) # Keep batch_size as first dimension outputs_class = torch.stack(outputs_classes, dim=1) outputs_coord = torch.stack(outputs_coords, dim=1) logits = outputs_class[:, -1] pred_boxes = outputs_coord[:, -1] loss, loss_dict, auxiliary_outputs = None, None, None if labels is not None: # First: create the matcher matcher = DetaHungarianMatcher( class_cost=self.config.class_cost, bbox_cost=self.config.bbox_cost, giou_cost=self.config.giou_cost ) # Second: create the criterion losses = ["labels", "boxes", "cardinality"] criterion = DetaLoss( matcher=matcher, num_classes=self.config.num_labels, focal_alpha=self.config.focal_alpha, losses=losses, num_queries=self.config.num_queries, ) criterion.to(logits.device) # Third: compute the losses, based on outputs and labels outputs_loss = {} outputs_loss["logits"] = logits outputs_loss["pred_boxes"] = pred_boxes if self.config.auxiliary_loss: intermediate = outputs.intermediate_hidden_states if return_dict else outputs[4] outputs_class = self.class_embed(intermediate) outputs_coord = self.bbox_embed(intermediate).sigmoid() auxiliary_outputs = self._set_aux_loss(outputs_class, outputs_coord) outputs_loss["auxiliary_outputs"] = auxiliary_outputs if self.config.two_stage: enc_outputs_coord = outputs.enc_outputs_coord_logits.sigmoid() outputs["enc_outputs"] = {"pred_logits": outputs.enc_outputs_class, "pred_boxes": enc_outputs_coord} loss_dict = criterion(outputs_loss, labels) # Fourth: compute total loss, as a weighted sum of the various losses weight_dict = {"loss_ce": 1, "loss_bbox": self.config.bbox_loss_coefficient} weight_dict["loss_giou"] = self.config.giou_loss_coefficient if self.config.auxiliary_loss: aux_weight_dict = {} for i in range(self.config.decoder_layers - 1): aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) weight_dict.update(aux_weight_dict) loss = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) if not return_dict: if auxiliary_outputs is not None: output = (logits, pred_boxes) + auxiliary_outputs + outputs else: output = (logits, pred_boxes) + outputs tuple_outputs = ((loss, loss_dict) + output) if loss is not None else output return tuple_outputs dict_outputs = DetaObjectDetectionOutput( loss=loss, loss_dict=loss_dict, logits=logits, pred_boxes=pred_boxes, auxiliary_outputs=auxiliary_outputs, last_hidden_state=outputs.last_hidden_state, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, intermediate_hidden_states=outputs.intermediate_hidden_states, intermediate_reference_points=outputs.intermediate_reference_points, init_reference_points=outputs.init_reference_points, enc_outputs_class=outputs.enc_outputs_class, enc_outputs_coord_logits=outputs.enc_outputs_coord_logits, ) return dict_outputs # Copied from transformers.models.detr.modeling_detr.dice_loss def dice_loss(inputs, targets, num_boxes): """ Compute the DICE loss, similar to generalized IOU for masks Args: inputs: A float tensor of arbitrary shape. The predictions for each example. targets: A float tensor with the same shape as inputs. Stores the binary classification label for each element in inputs (0 for the negative class and 1 for the positive class). """ inputs = inputs.sigmoid() inputs = inputs.flatten(1) numerator = 2 * (inputs * targets).sum(1) denominator = inputs.sum(-1) + targets.sum(-1) loss = 1 - (numerator + 1) / (denominator + 1) return loss.sum() / num_boxes # Copied from transformers.models.detr.modeling_detr.sigmoid_focal_loss def sigmoid_focal_loss(inputs, targets, num_boxes, alpha: float = 0.25, gamma: float = 2): """ Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. Args: inputs (`torch.FloatTensor` of arbitrary shape): The predictions for each example. targets (`torch.FloatTensor` with the same shape as `inputs`) A tensor storing the binary classification label for each element in the `inputs` (0 for the negative class and 1 for the positive class). alpha (`float`, *optional*, defaults to `0.25`): Optional weighting factor in the range (0,1) to balance positive vs. negative examples. gamma (`int`, *optional*, defaults to `2`): Exponent of the modulating factor (1 - p_t) to balance easy vs hard examples. Returns: Loss tensor """ prob = inputs.sigmoid() ce_loss = nn.functional.binary_cross_entropy_with_logits(inputs, targets, reduction="none") # add modulating factor p_t = prob * targets + (1 - prob) * (1 - targets) loss = ce_loss * ((1 - p_t) ** gamma) if alpha >= 0: alpha_t = alpha * targets + (1 - alpha) * (1 - targets) loss = alpha_t * loss return loss.mean(1).sum() / num_boxes class DetaLoss(nn.Module): """ This class computes the losses for `DetaForObjectDetection`. The process happens in two steps: 1) we compute hungarian assignment between ground truth boxes and the outputs of the model 2) we supervise each pair of matched ground-truth / prediction (supervised class and box). Args: matcher (`DetaHungarianMatcher`): Module able to compute a matching between targets and proposals. num_classes (`int`): Number of object categories, omitting the special no-object category. focal_alpha (`float`): Alpha parameter in focal loss. losses (`List[str]`): List of all the losses to be applied. See `get_loss` for a list of all available losses. """ def __init__( self, matcher, num_classes, focal_alpha, losses, num_queries, assign_first_stage=False, assign_second_stage=False, ): super().__init__() self.matcher = matcher self.num_classes = num_classes self.focal_alpha = focal_alpha self.losses = losses self.assign_first_stage = assign_first_stage self.assign_second_stage = assign_second_stage if self.assign_first_stage: self.stg1_assigner = DetaStage1Assigner() if self.assign_second_stage: self.stg2_assigner = DetaStage2Assigner(num_queries) # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_labels def loss_labels(self, outputs, targets, indices, num_boxes): """ Classification loss (Binary focal loss) targets dicts must contain the key "class_labels" containing a tensor of dim [nb_target_boxes] """ if "logits" not in outputs: raise KeyError("No logits were found in the outputs") source_logits = outputs["logits"] idx = self._get_source_permutation_idx(indices) target_classes_o = torch.cat([t["class_labels"][J] for t, (_, J) in zip(targets, indices)]) target_classes = torch.full( source_logits.shape[:2], self.num_classes, dtype=torch.int64, device=source_logits.device ) target_classes[idx] = target_classes_o target_classes_onehot = torch.zeros( [source_logits.shape[0], source_logits.shape[1], source_logits.shape[2] + 1], dtype=source_logits.dtype, layout=source_logits.layout, device=source_logits.device, ) target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1) target_classes_onehot = target_classes_onehot[:, :, :-1] loss_ce = ( sigmoid_focal_loss(source_logits, target_classes_onehot, num_boxes, alpha=self.focal_alpha, gamma=2) * source_logits.shape[1] ) losses = {"loss_ce": loss_ce} return losses @torch.no_grad() # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_cardinality def loss_cardinality(self, outputs, targets, indices, num_boxes): """ Compute the cardinality error, i.e. the absolute error in the number of predicted non-empty boxes. This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients. """ logits = outputs["logits"] device = logits.device target_lengths = torch.as_tensor([len(v["class_labels"]) for v in targets], device=device) # Count the number of predictions that are NOT "no-object" (which is the last class) card_pred = (logits.argmax(-1) != logits.shape[-1] - 1).sum(1) card_err = nn.functional.l1_loss(card_pred.float(), target_lengths.float()) losses = {"cardinality_error": card_err} return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.loss_boxes def loss_boxes(self, outputs, targets, indices, num_boxes): """ Compute the losses related to the bounding boxes, the L1 regression loss and the GIoU loss. Targets dicts must contain the key "boxes" containing a tensor of dim [nb_target_boxes, 4]. The target boxes are expected in format (center_x, center_y, w, h), normalized by the image size. """ if "pred_boxes" not in outputs: raise KeyError("No predicted boxes found in outputs") idx = self._get_source_permutation_idx(indices) source_boxes = outputs["pred_boxes"][idx] target_boxes = torch.cat([t["boxes"][i] for t, (_, i) in zip(targets, indices)], dim=0) loss_bbox = nn.functional.l1_loss(source_boxes, target_boxes, reduction="none") losses = {} losses["loss_bbox"] = loss_bbox.sum() / num_boxes loss_giou = 1 - torch.diag( generalized_box_iou(center_to_corners_format(source_boxes), center_to_corners_format(target_boxes)) ) losses["loss_giou"] = loss_giou.sum() / num_boxes return losses # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_source_permutation_idx def _get_source_permutation_idx(self, indices): # permute predictions following indices batch_idx = torch.cat([torch.full_like(source, i) for i, (source, _) in enumerate(indices)]) source_idx = torch.cat([source for (source, _) in indices]) return batch_idx, source_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss._get_target_permutation_idx def _get_target_permutation_idx(self, indices): # permute targets following indices batch_idx = torch.cat([torch.full_like(target, i) for i, (_, target) in enumerate(indices)]) target_idx = torch.cat([target for (_, target) in indices]) return batch_idx, target_idx # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrLoss.get_loss def get_loss(self, loss, outputs, targets, indices, num_boxes): loss_map = { "labels": self.loss_labels, "cardinality": self.loss_cardinality, "boxes": self.loss_boxes, } if loss not in loss_map: raise ValueError(f"Loss {loss} not supported") return loss_map[loss](outputs, targets, indices, num_boxes) def forward(self, outputs, targets): """ This performs the loss computation. Args: outputs (`dict`, *optional*): Dictionary of tensors, see the output specification of the model for the format. targets (`List[dict]`, *optional*): List of dicts, such that `len(targets) == batch_size`. The expected keys in each dict depends on the losses applied, see each loss' doc. """ outputs_without_aux = {k: v for k, v in outputs.items() if k != "auxiliary_outputs"} # Retrieve the matching between the outputs of the last layer and the targets if self.assign_second_stage: indices = self.stg2_assigner(outputs_without_aux, targets) else: indices = self.matcher(outputs_without_aux, targets) # Compute the average number of target boxes accross all nodes, for normalization purposes num_boxes = sum(len(t["class_labels"]) for t in targets) num_boxes = torch.as_tensor([num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device) # (Niels): comment out function below, distributed training to be added # if is_dist_avail_and_initialized(): # torch.distributed.all_reduce(num_boxes) # (Niels) in original implementation, num_boxes is divided by get_world_size() num_boxes = torch.clamp(num_boxes, min=1).item() # Compute all the requested losses losses = {} for loss in self.losses: losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes)) # In case of auxiliary losses, we repeat this process with the output of each intermediate layer. if "auxiliary_outputs" in outputs: for i, auxiliary_outputs in enumerate(outputs["auxiliary_outputs"]): if not self.assign_second_stage: indices = self.matcher(auxiliary_outputs, targets) for loss in self.losses: l_dict = self.get_loss(loss, auxiliary_outputs, targets, indices, num_boxes) l_dict = {k + f"_{i}": v for k, v in l_dict.items()} losses.update(l_dict) if "enc_outputs" in outputs: enc_outputs = outputs["enc_outputs"] bin_targets = copy.deepcopy(targets) for bt in bin_targets: bt["labels"] = torch.zeros_like(bt["labels"]) if self.assign_first_stage: indices = self.stg1_assigner(enc_outputs, bin_targets) else: indices = self.matcher(enc_outputs, bin_targets) for loss in self.losses: kwargs = {} if loss == "labels": # Logging is enabled only for the last layer kwargs["log"] = False l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs) l_dict = {k + "_enc": v for k, v in l_dict.items()} losses.update(l_dict) return losses # Copied from transformers.models.detr.modeling_detr.DetrMLPPredictionHead class DetaMLPPredictionHead(nn.Module): """ Very simple multi-layer perceptron (MLP, also called FFN), used to predict the normalized center coordinates, height and width of a bounding box w.r.t. an image. Copied from https://github.com/facebookresearch/detr/blob/master/models/detr.py """ def __init__(self, input_dim, hidden_dim, output_dim, num_layers): super().__init__() self.num_layers = num_layers h = [hidden_dim] * (num_layers - 1) self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])) def forward(self, x): for i, layer in enumerate(self.layers): x = nn.functional.relu(layer(x)) if i < self.num_layers - 1 else layer(x) return x # Copied from transformers.models.deformable_detr.modeling_deformable_detr.DeformableDetrHungarianMatcher with DeformableDetr->Deta class DetaHungarianMatcher(nn.Module): """ This class computes an assignment between the targets and the predictions of the network. For efficiency reasons, the targets don't include the no_object. Because of this, in general, there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions, while the others are un-matched (and thus treated as non-objects). Args: class_cost: The relative weight of the classification error in the matching cost. bbox_cost: The relative weight of the L1 error of the bounding box coordinates in the matching cost. giou_cost: The relative weight of the giou loss of the bounding box in the matching cost. """ def __init__(self, class_cost: float = 1, bbox_cost: float = 1, giou_cost: float = 1): super().__init__() requires_backends(self, ["scipy"]) self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost if class_cost == 0 and bbox_cost == 0 and giou_cost == 0: raise ValueError("All costs of the Matcher can't be 0") @torch.no_grad() def forward(self, outputs, targets): """ Args: outputs (`dict`): A dictionary that contains at least these entries: * "logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits * "pred_boxes": Tensor of dim [batch_size, num_queries, 4] with the predicted box coordinates. targets (`List[dict]`): A list of targets (len(targets) = batch_size), where each target is a dict containing: * "class_labels": Tensor of dim [num_target_boxes] (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels * "boxes": Tensor of dim [num_target_boxes, 4] containing the target box coordinates. Returns: `List[Tuple]`: A list of size `batch_size`, containing tuples of (index_i, index_j) where: - index_i is the indices of the selected predictions (in order) - index_j is the indices of the corresponding selected targets (in order) For each batch element, it holds: len(index_i) = len(index_j) = min(num_queries, num_target_boxes) """ batch_size, num_queries = outputs["logits"].shape[:2] # We flatten to compute the cost matrices in a batch out_prob = outputs["logits"].flatten(0, 1).sigmoid() # [batch_size * num_queries, num_classes] out_bbox = outputs["pred_boxes"].flatten(0, 1) # [batch_size * num_queries, 4] # Also concat the target labels and boxes target_ids = torch.cat([v["class_labels"] for v in targets]) target_bbox = torch.cat([v["boxes"] for v in targets]) # Compute the classification cost. alpha = 0.25 gamma = 2.0 neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log()) pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log()) class_cost = pos_cost_class[:, target_ids] - neg_cost_class[:, target_ids] # Compute the L1 cost between boxes bbox_cost = torch.cdist(out_bbox, target_bbox, p=1) # Compute the giou cost between boxes giou_cost = -generalized_box_iou(center_to_corners_format(out_bbox), center_to_corners_format(target_bbox)) # Final cost matrix cost_matrix = self.bbox_cost * bbox_cost + self.class_cost * class_cost + self.giou_cost * giou_cost cost_matrix = cost_matrix.view(batch_size, num_queries, -1).cpu() sizes = [len(v["boxes"]) for v in targets] indices = [linear_sum_assignment(c[i]) for i, c in enumerate(cost_matrix.split(sizes, -1))] return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices] # Copied from transformers.models.detr.modeling_detr._upcast def _upcast(t: Tensor) -> Tensor: # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type if t.is_floating_point(): return t if t.dtype in (torch.float32, torch.float64) else t.float() else: return t if t.dtype in (torch.int32, torch.int64) else t.int() # Copied from transformers.models.detr.modeling_detr.box_area def box_area(boxes: Tensor) -> Tensor: """ Computes the area of a set of bounding boxes, which are specified by its (x1, y1, x2, y2) coordinates. Args: boxes (`torch.FloatTensor` of shape `(number_of_boxes, 4)`): Boxes for which the area will be computed. They are expected to be in (x1, y1, x2, y2) format with `0 <= x1 < x2` and `0 <= y1 < y2`. Returns: `torch.FloatTensor`: a tensor containing the area for each box. """ boxes = _upcast(boxes) return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) # Copied from transformers.models.detr.modeling_detr.box_iou def box_iou(boxes1, boxes2): area1 = box_area(boxes1) area2 = box_area(boxes2) left_top = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] right_bottom = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] width_height = (right_bottom - left_top).clamp(min=0) # [N,M,2] inter = width_height[:, :, 0] * width_height[:, :, 1] # [N,M] union = area1[:, None] + area2 - inter iou = inter / union return iou, union # Copied from transformers.models.detr.modeling_detr.generalized_box_iou def generalized_box_iou(boxes1, boxes2): """ Generalized IoU from https://giou.stanford.edu/. The boxes should be in [x0, y0, x1, y1] (corner) format. Returns: `torch.FloatTensor`: a [N, M] pairwise matrix, where N = len(boxes1) and M = len(boxes2) """ # degenerate boxes gives inf / nan results # so do an early check if not (boxes1[:, 2:] >= boxes1[:, :2]).all(): raise ValueError(f"boxes1 must be in [x0, y0, x1, y1] (corner) format, but got {boxes1}") if not (boxes2[:, 2:] >= boxes2[:, :2]).all(): raise ValueError(f"boxes2 must be in [x0, y0, x1, y1] (corner) format, but got {boxes2}") iou, union = box_iou(boxes1, boxes2) top_left = torch.min(boxes1[:, None, :2], boxes2[:, :2]) bottom_right = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) width_height = (bottom_right - top_left).clamp(min=0) # [N,M,2] area = width_height[:, :, 0] * width_height[:, :, 1] return iou - (area - union) / area # from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/layers/wrappers.py#L100 def nonzero_tuple(x): """ A 'as_tuple=True' version of torch.nonzero to support torchscript. because of https://github.com/pytorch/pytorch/issues/38718 """ if torch.jit.is_scripting(): if x.dim() == 0: return x.unsqueeze(0).nonzero().unbind(1) return x.nonzero().unbind(1) else: return x.nonzero(as_tuple=True) # from https://github.com/facebookresearch/detectron2/blob/9921a2caa585d4fa66c4b534b6fab6e74d89b582/detectron2/modeling/matcher.py#L9 class DetaMatcher(object): """ This class assigns to each predicted "element" (e.g., a box) a ground-truth element. Each predicted element will have exactly zero or one matches; each ground-truth element may be matched to zero or more predicted elements. The matching is determined by the MxN match_quality_matrix, that characterizes how well each (ground-truth, prediction)-pair match each other. For example, if the elements are boxes, this matrix may contain box intersection-over-union overlap values. The matcher returns (a) a vector of length N containing the index of the ground-truth element m in [0, M) that matches to prediction n in [0, N). (b) a vector of length N containing the labels for each prediction. """ def __init__(self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False): """ Args: thresholds (`list[float]`): A list of thresholds used to stratify predictions into levels. labels (`list[int`): A list of values to label predictions belonging at each level. A label can be one of {-1, 0, 1} signifying {ignore, negative class, positive class}, respectively. allow_low_quality_matches (`bool`, *optional*, defaults to `False`): If `True`, produce additional matches for predictions with maximum match quality lower than high_threshold. See `set_low_quality_matches_` for more details. For example, thresholds = [0.3, 0.5] labels = [0, -1, 1] All predictions with iou < 0.3 will be marked with 0 and thus will be considered as false positives while training. All predictions with 0.3 <= iou < 0.5 will be marked with -1 and thus will be ignored. All predictions with 0.5 <= iou will be marked with 1 and thus will be considered as true positives. """ # Add -inf and +inf to first and last position in thresholds thresholds = thresholds[:] if thresholds[0] < 0: raise ValueError("Thresholds should be positive") thresholds.insert(0, -float("inf")) thresholds.append(float("inf")) # Currently torchscript does not support all + generator if not all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])]): raise ValueError("Thresholds should be sorted.") if not all([l in [-1, 0, 1] for l in labels]): raise ValueError("All labels should be either -1, 0 or 1") if len(labels) != len(thresholds) - 1: raise ValueError("Number of labels should be equal to number of thresholds - 1") self.thresholds = thresholds self.labels = labels self.allow_low_quality_matches = allow_low_quality_matches def __call__(self, match_quality_matrix): """ Args: match_quality_matrix (Tensor[float]): an MxN tensor, containing the pairwise quality between M ground-truth elements and N predicted elements. All elements must be >= 0 (due to the us of `torch.nonzero` for selecting indices in `set_low_quality_matches_`). Returns: matches (Tensor[int64]): a vector of length N, where matches[i] is a matched ground-truth index in [0, M) match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates whether a prediction is a true or false positive or ignored """ assert match_quality_matrix.dim() == 2 if match_quality_matrix.numel() == 0: default_matches = match_quality_matrix.new_full((match_quality_matrix.size(1),), 0, dtype=torch.int64) # When no gt boxes exist, we define IOU = 0 and therefore set labels # to `self.labels[0]`, which usually defaults to background class 0 # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds default_match_labels = match_quality_matrix.new_full( (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8 ) return default_matches, default_match_labels assert torch.all(match_quality_matrix >= 0) # match_quality_matrix is M (gt) x N (predicted) # Max over gt elements (dim 0) to find best gt candidate for each prediction matched_vals, matches = match_quality_matrix.max(dim=0) match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8) for l, low, high in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]): low_high = (matched_vals >= low) & (matched_vals < high) match_labels[low_high] = l if self.allow_low_quality_matches: self.set_low_quality_matches_(match_labels, match_quality_matrix) return matches, match_labels def set_low_quality_matches_(self, match_labels, match_quality_matrix): """ Produce additional matches for predictions that have only low-quality matches. Specifically, for each ground-truth G find the set of predictions that have maximum overlap with it (including ties); for each prediction in that set, if it is unmatched, then match it to the ground-truth G. This function implements the RPN assignment case (i) in Sec. 3.1.2 of :paper:`Faster R-CNN`. """ # For each gt, find the prediction with which it has highest quality highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) # Find the highest quality match available, even if it is low, including ties. # Note that the matches qualities must be positive due to the use of # `torch.nonzero`. _, pred_inds_with_highest_quality = nonzero_tuple(match_quality_matrix == highest_quality_foreach_gt[:, None]) # If an anchor was labeled positive only due to a low-quality match # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B. # This follows the implementation in Detectron, and is found to have no significant impact. match_labels[pred_inds_with_highest_quality] = 1 # from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/sampling.py#L9 def subsample_labels(labels: torch.Tensor, num_samples: int, positive_fraction: float, bg_label: int): """ Return `num_samples` (or fewer, if not enough found) random samples from `labels` which is a mixture of positives & negatives. It will try to return as many positives as possible without exceeding `positive_fraction * num_samples`, and then try to fill the remaining slots with negatives. Args: labels (Tensor): (N, ) label vector with values: * -1: ignore * bg_label: background ("negative") class * otherwise: one or more foreground ("positive") classes num_samples (int): The total number of labels with value >= 0 to return. Values that are not sampled will be filled with -1 (ignore). positive_fraction (float): The number of subsampled labels with values > 0 is `min(num_positives, int(positive_fraction * num_samples))`. The number of negatives sampled is `min(num_negatives, num_samples - num_positives_sampled)`. In order words, if there are not enough positives, the sample is filled with negatives. If there are also not enough negatives, then as many elements are sampled as is possible. bg_label (int): label index of background ("negative") class. Returns: pos_idx, neg_idx (Tensor): 1D vector of indices. The total length of both is `num_samples` or fewer. """ positive = nonzero_tuple((labels != -1) & (labels != bg_label))[0] negative = nonzero_tuple(labels == bg_label)[0] num_pos = int(num_samples * positive_fraction) # protect against not enough positive examples num_pos = min(positive.numel(), num_pos) num_neg = num_samples - num_pos # protect against not enough negative examples num_neg = min(negative.numel(), num_neg) # randomly select positive and negative examples perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] pos_idx = positive[perm1] neg_idx = negative[perm2] return pos_idx, neg_idx def sample_topk_per_gt(pr_inds, gt_inds, iou, k): if len(gt_inds) == 0: return pr_inds, gt_inds # find topk matches for each gt gt_inds2, counts = gt_inds.unique(return_counts=True) scores, pr_inds2 = iou[gt_inds2].topk(k, dim=1) gt_inds2 = gt_inds2[:, None].repeat(1, k) # filter to as many matches that gt has pr_inds3 = torch.cat([pr[:c] for c, pr in zip(counts, pr_inds2)]) gt_inds3 = torch.cat([gt[:c] for c, gt in zip(counts, gt_inds2)]) return pr_inds3, gt_inds3 # modified from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/roi_heads/roi_heads.py#L123 class DetaStage2Assigner(nn.Module): def __init__(self, num_queries, max_k=4): super().__init__() self.positive_fraction = 0.25 self.bg_label = 400 # number > 91 to filter out later self.batch_size_per_image = num_queries self.proposal_matcher = DetaMatcher(thresholds=[0.6], labels=[0, 1], allow_low_quality_matches=True) self.k = max_k def _sample_proposals(self, matched_idxs: torch.Tensor, matched_labels: torch.Tensor, gt_classes: torch.Tensor): """ Based on the matching between N proposals and M groundtruth, sample the proposals and set their classification labels. Args: matched_idxs (Tensor): a vector of length N, each is the best-matched gt index in [0, M) for each proposal. matched_labels (Tensor): a vector of length N, the matcher's label (one of cfg.MODEL.ROI_HEADS.IOU_LABELS) for each proposal. gt_classes (Tensor): a vector of length M. Returns: Tensor: a vector of indices of sampled proposals. Each is in [0, N). Tensor: a vector of the same length, the classification label for each sampled proposal. Each sample is labeled as either a category in [0, num_classes) or the background (num_classes). """ has_gt = gt_classes.numel() > 0 # Get the corresponding GT for each proposal if has_gt: gt_classes = gt_classes[matched_idxs] # Label unmatched proposals (0 label from matcher) as background (label=num_classes) gt_classes[matched_labels == 0] = self.bg_label # Label ignore proposals (-1 label) gt_classes[matched_labels == -1] = -1 else: gt_classes = torch.zeros_like(matched_idxs) + self.bg_label sampled_fg_idxs, sampled_bg_idxs = subsample_labels( gt_classes, self.batch_size_per_image, self.positive_fraction, self.bg_label ) sampled_idxs = torch.cat([sampled_fg_idxs, sampled_bg_idxs], dim=0) return sampled_idxs, gt_classes[sampled_idxs] def forward(self, outputs, targets, return_cost_matrix=False): # COCO categories are from 1 to 90. They set num_classes=91 and apply sigmoid. bs = len(targets) indices = [] ious = [] for b in range(bs): iou, _ = box_iou( center_to_corners_format(targets[b]["boxes"]), center_to_corners_format(outputs["init_reference"][b].detach()), ) matched_idxs, matched_labels = self.proposal_matcher( iou ) # proposal_id -> highest_iou_gt_id, proposal_id -> [1 if iou > 0.6, 0 ow] ( sampled_idxs, sampled_gt_classes, ) = self._sample_proposals( # list of sampled proposal_ids, sampled_id -> [0, num_classes)+[bg_label] matched_idxs, matched_labels, targets[b]["labels"] ) pos_pr_inds = sampled_idxs[sampled_gt_classes != self.bg_label] pos_gt_inds = matched_idxs[pos_pr_inds] pos_pr_inds, pos_gt_inds = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou) indices.append((pos_pr_inds, pos_gt_inds)) ious.append(iou) if return_cost_matrix: return indices, ious return indices def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k) # modified from https://github.com/facebookresearch/detectron2/blob/cbbc1ce26473cb2a5cc8f58e8ada9ae14cb41052/detectron2/modeling/proposal_generator/rpn.py#L181 class DetaStage1Assigner(nn.Module): def __init__(self, t_low=0.3, t_high=0.7, max_k=4): super().__init__() self.positive_fraction = 0.5 self.batch_size_per_image = 256 self.k = max_k self.t_low = t_low self.t_high = t_high self.anchor_matcher = DetaMatcher( thresholds=[t_low, t_high], labels=[0, -1, 1], allow_low_quality_matches=True ) def _subsample_labels(self, label): """ Randomly sample a subset of positive and negative examples, and overwrite the label vector to the ignore value (-1) for all elements that are not included in the sample. Args: labels (Tensor): a vector of -1, 0, 1. Will be modified in-place and returned. """ pos_idx, neg_idx = subsample_labels(label, self.batch_size_per_image, self.positive_fraction, 0) # Fill with the ignore label (-1), then set positive and negative labels label.fill_(-1) label.scatter_(0, pos_idx, 1) label.scatter_(0, neg_idx, 0) return label def forward(self, outputs, targets): bs = len(targets) indices = [] for b in range(bs): anchors = outputs["anchors"][b] if len(targets[b]["boxes"]) == 0: indices.append( ( torch.tensor([], dtype=torch.long, device=anchors.device), torch.tensor([], dtype=torch.long, device=anchors.device), ) ) continue iou, _ = box_iou( center_to_corners_format(targets[b]["boxes"]), center_to_corners_format(anchors), ) matched_idxs, matched_labels = self.anchor_matcher( iou ) # proposal_id -> highest_iou_gt_id, proposal_id -> [1 if iou > 0.7, 0 if iou < 0.3, -1 ow] matched_labels = self._subsample_labels(matched_labels) all_pr_inds = torch.arange(len(anchors)) pos_pr_inds = all_pr_inds[matched_labels == 1] pos_gt_inds = matched_idxs[pos_pr_inds] pos_pr_inds, pos_gt_inds = self.postprocess_indices(pos_pr_inds, pos_gt_inds, iou) pos_pr_inds, pos_gt_inds = pos_pr_inds.to(anchors.device), pos_gt_inds.to(anchors.device) indices.append((pos_pr_inds, pos_gt_inds)) return indices def postprocess_indices(self, pr_inds, gt_inds, iou): return sample_topk_per_gt(pr_inds, gt_inds, iou, self.k)
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/mask2former/image_processing_mask2former.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Mask2Former.""" import math import warnings from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from transformers.image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, pad, rescale, resize, to_channel_dimension_format, to_numpy_array, ) from transformers.image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_batched, valid_images, ) from transformers.utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, is_torch_available, is_torch_tensor, logging, ) logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments # TODO: (Amy) Move to image_transforms def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, ): if reduce_labels and ignore_index is None: raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.") if reduce_labels: segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) # Get unique ids (class or instance ids based on input) all_labels = np.unique(segmentation_map) # Drop background label if applicable if ignore_index is not None: all_labels = all_labels[all_labels != ignore_index] # Generate a binary mask for each object instance binary_masks = [(segmentation_map == i) for i in all_labels] binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) # Convert instance ids to class ids if instance_id_to_semantic_id is not None: labels = np.zeros(all_labels.shape[0]) for label in all_labels: class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label] labels[all_labels == label] = class_id - 1 if reduce_labels else class_id else: labels = all_labels return binary_masks.astype(np.float32), labels.astype(np.int64) def get_mask2former_resize_output_image_size( image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int] = None, size_divisor: int = 0, default_to_square: bool = True, ) -> tuple: """ Computes the output size given the desired size. Args: input_image (`np.ndarray`): The input image. size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): The size of the output image. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. max_size (`int`, *optional*): The maximum size of the output image. size_divisible (`int`, *optional*, defaults to `0`): If size_divisible is given, the output image size will be divisible by the number. Returns: `Tuple[int, int]`: The output size. """ output_size = get_resize_output_image_size( input_image=image, size=size, default_to_square=default_to_square, max_size=max_size ) if size_divisor > 0: height, width = output_size height = int(math.ceil(height / size_divisor) * size_divisor) width = int(math.ceil(width / size_divisor) * size_divisor) output_size = (height, width) return output_size class Mask2FormerImageProcessor(BaseImageProcessor): r""" Constructs a Mask2Former image processor. The image processor can be used to prepare image(s) and optional targets for the model. This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. size_divisor (`int`, *optional*, defaults to 32): Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in Swin Transformer. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input to a certain `scale`. rescale_factor (`float`, *optional*, defaults to 1/ 255): Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels denoted with 0 (background) will be replaced with `ignore_index`. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by `ignore_index`. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, size_divisor: int = 32, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, **kwargs, ): if "size_divisibility" in kwargs: warnings.warn( "The `size_divisibility` argument is deprecated and will be removed in v4.27. Please use " "`size_divisor` instead.", FutureWarning, ) size_divisor = kwargs.pop("size_divisibility") if "max_size" in kwargs: warnings.warn( "The `max_size` argument is deprecated and will be removed in v4.27. Please use size['longest_edge']" " instead.", FutureWarning, ) # We make max_size a private attribute so we can pass it as a default value in the preprocess method whilst # `size` can still be pass in as an int self._max_size = kwargs.pop("max_size") else: self._max_size = 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} size = get_size_dict(size, max_size=self._max_size, default_to_square=False) super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.ignore_index = ignore_index self.reduce_labels = reduce_labels @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `Mask2FormerImageProcessor.from_pretrained(checkpoint, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "size_divisibility" in kwargs: image_processor_dict["size_divisibility"] = kwargs.pop("size_divisibility") return super().from_dict(image_processor_dict, **kwargs) @property def size_divisibility(self): warnings.warn( "The `size_divisibility` property is deprecated and will be removed in v4.27. Please use " "`size_divisor` instead.", FutureWarning, ) return self.size_divisor @property def max_size(self): warnings.warn( "The `max_size` property is deprecated and will be removed in v4.27. Please use size['longest_edge']" " instead.", FutureWarning, ) return self.size["longest_edge"] def resize( self, image: np.ndarray, size: Dict[str, int], size_divisor: int = 0, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format=None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size, max_size = size["shortest_edge"], size["longest_edge"] elif "height" in size and "width" in size: size = (size["height"], size["width"]) max_size = None else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) size = get_mask2former_resize_output_image_size( image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, ) image = resize(image, size=size, resample=resample, data_format=data_format) return image def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) def convert_segmentation_map_to_binary_masks( self, segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, ): reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels ignore_index = ignore_index if ignore_index is not None else self.ignore_index return convert_segmentation_map_to_binary_masks( segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, reduce_labels=reduce_labels, ) def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature: return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs) def _preprocess( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ): if do_resize: image = self.resize(image, size=size, size_divisor=size_divisor, resample=resample) if do_rescale: image = self.rescale(image, rescale_factor=rescale_factor) if do_normalize: image = self.normalize(image, mean=image_mean, std=image_std) return image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) image = self._preprocess( image=image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, ) if data_format is not None: image = to_channel_dimension_format(image, data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = 0, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations added_channel_dim = False if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] # TODO: (Amy) # Remork segmentation map processing to include reducing labels and resizing which doesn't # drop segment IDs > 255. segmentation_map = self._preprocess( image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, size_divisor=size_divisor, do_rescale=False, do_normalize=False, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) return segmentation_map def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, instance_id_to_semantic_id: Optional[Dict[int, int]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, size_divisor: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ignore_index: Optional[int] = None, reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version", FutureWarning, ) do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False, max_size=self._max_size) size_divisor = size_divisor if size_divisor is not None else self.size_divisor resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std ignore_index = ignore_index if ignore_index is not None else self.ignore_index reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels if do_resize is not None and size is None or size_divisor is None: raise ValueError("If `do_resize` is True, `size` and `size_divisor` must be provided.") if do_rescale is not None and rescale_factor is None: raise ValueError("If `do_rescale` is True, `rescale_factor` must be provided.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("If `do_normalize` is True, `image_mean` and `image_std` must be provided.") if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None and not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if not is_batched(images): images = [images] segmentation_maps = [segmentation_maps] if segmentation_maps is not None else None if segmentation_maps is not None and len(images) != len(segmentation_maps): raise ValueError("Images and segmentation maps must have the same length.") images = [ self._preprocess_image( image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, ) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask(segmentation_map, do_resize, size, size_divisor) for segmentation_map in segmentation_maps ] encoded_inputs = self.encode_inputs( images, segmentation_maps, instance_id_to_semantic_id, ignore_index, reduce_labels, return_tensors ) return encoded_inputs # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def encode_inputs( self, pixel_values_list: List[ImageInput], segmentation_maps: ImageInput = None, instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. Mask2Former addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps will be converted to lists of binary masks and their respective labels. Let's see an example, assuming `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for each mask. Args: pixel_values_list (`List[ImageInput]`): List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, width)`. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an instance segmentation map where each pixel represents an instance id. Can be provided as a single dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. """ ignore_index = self.ignore_index if ignore_index is None else ignore_index reduce_labels = self.reduce_labels if reduce_labels is None else reduce_labels if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument has no effect and will be removed in v4.27", FutureWarning ) pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors) if segmentation_maps is not None: mask_labels = [] class_labels = [] pad_size = get_max_height_width(pixel_values_list) # Convert to list of binary masks and labels for idx, segmentation_map in enumerate(segmentation_maps): segmentation_map = to_numpy_array(segmentation_map) if isinstance(instance_id_to_semantic_id, list): instance_id = instance_id_to_semantic_id[idx] else: instance_id = instance_id_to_semantic_id # Use instance2class_id mapping per image masks, classes = self.convert_segmentation_map_to_binary_masks( segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels ) # We add an axis to make them compatible with the transformations library # this will be removed in the future masks = [mask[None, ...] for mask in masks] masks = [ self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks ] masks = np.concatenate(masks, axis=0) mask_labels.append(torch.from_numpy(masks)) class_labels.append(torch.from_numpy(classes)) # we cannot batch them since they don't share a common class size encoded_inputs["mask_labels"] = mask_labels encoded_inputs["class_labels"] = class_labels return encoded_inputs def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, return_binary_maps: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. return_binary_maps (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps (one per detected instance). Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if return_coco_annotation and return_binary_maps: raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.") # [batch_size, num_queries, num_classes+1] class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, height, width] masks_queries_logits = outputs.masks_queries_logits # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) device = masks_queries_logits.device num_classes = class_queries_logits.shape[-1] - 1 num_queries = class_queries_logits.shape[-2] # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(class_queries_logits.shape[0]): mask_pred = masks_queries_logits[i] mask_cls = class_queries_logits[i] scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1] labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = topk_indices // num_classes mask_pred = mask_pred[topk_indices] pred_masks = (mask_pred > 0).float() # Calculate average mask prob mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / ( pred_masks.flatten(1).sum(1) + 1e-6 ) pred_scores = scores_per_image * mask_scores_per_image pred_classes = labels_per_image segmentation = torch.zeros((384, 384)) - 1 if target_sizes is not None: segmentation = torch.zeros(target_sizes[i]) - 1 pred_masks = torch.nn.functional.interpolate( pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest" )[0] instance_maps, segments = [], [] current_segment_id = 0 for j in range(num_queries): score = pred_scores[j].item() if not torch.all(pred_masks[j] == 0) and score >= threshold: segmentation[pred_masks[j] == 1] = current_segment_id segments.append( { "id": current_segment_id, "label_id": pred_classes[j].item(), "was_fused": False, "score": round(score, 6), } ) current_segment_id += 1 instance_maps.append(pred_masks[j]) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) # Return a concatenated tensor of binary instance maps if return_binary_maps and len(instance_maps) != 0: segmentation = torch.stack(instance_maps, dim=0) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentationOutput`]): The outputs from [`Mask2FormerForUniversalSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Mask2Former.""" import math import warnings from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from transformers.image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, pad, rescale, resize, to_channel_dimension_format, to_numpy_array, ) from transformers.image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, is_batched, valid_images, ) from transformers.utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, is_torch_available, is_torch_tensor, logging, ) logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from ...pytorch_utils import torch_int_div # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments # TODO: (Amy) Move to image_transforms def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, ): if reduce_labels and ignore_index is None: raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.") if reduce_labels: segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) # Get unique ids (class or instance ids based on input) all_labels = np.unique(segmentation_map) # Drop background label if applicable if ignore_index is not None: all_labels = all_labels[all_labels != ignore_index] # Generate a binary mask for each object instance binary_masks = [(segmentation_map == i) for i in all_labels] binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) # Convert instance ids to class ids if instance_id_to_semantic_id is not None: labels = np.zeros(all_labels.shape[0]) for label in all_labels: class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label] labels[all_labels == label] = class_id - 1 if reduce_labels else class_id else: labels = all_labels return binary_masks.astype(np.float32), labels.astype(np.int64) def get_mask2former_resize_output_image_size( image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int] = None, size_divisor: int = 0, default_to_square: bool = True, ) -> tuple: """ Computes the output size given the desired size. Args: input_image (`np.ndarray`): The input image. size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): The size of the output image. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. max_size (`int`, *optional*): The maximum size of the output image. size_divisible (`int`, *optional*, defaults to `0`): If size_divisible is given, the output image size will be divisible by the number. Returns: `Tuple[int, int]`: The output size. """ output_size = get_resize_output_image_size( input_image=image, size=size, default_to_square=default_to_square, max_size=max_size ) if size_divisor > 0: height, width = output_size height = int(math.ceil(height / size_divisor) * size_divisor) width = int(math.ceil(width / size_divisor) * size_divisor) output_size = (height, width) return output_size class Mask2FormerImageProcessor(BaseImageProcessor): r""" Constructs a Mask2Former image processor. The image processor can be used to prepare image(s) and optional targets for the model. This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. size_divisor (`int`, *optional*, defaults to 32): Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in Swin Transformer. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input to a certain `scale`. rescale_factor (`float`, *optional*, defaults to 1/ 255): Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels denoted with 0 (background) will be replaced with `ignore_index`. reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by `ignore_index`. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, size_divisor: int = 32, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, **kwargs, ): if "size_divisibility" in kwargs: warnings.warn( "The `size_divisibility` argument is deprecated and will be removed in v4.27. Please use " "`size_divisor` instead.", FutureWarning, ) size_divisor = kwargs.pop("size_divisibility") if "max_size" in kwargs: warnings.warn( "The `max_size` argument is deprecated and will be removed in v4.27. Please use size['longest_edge']" " instead.", FutureWarning, ) # We make max_size a private attribute so we can pass it as a default value in the preprocess method whilst # `size` can still be pass in as an int self._max_size = kwargs.pop("max_size") else: self._max_size = 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} size = get_size_dict(size, max_size=self._max_size, default_to_square=False) super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.ignore_index = ignore_index self.reduce_labels = reduce_labels @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `Mask2FormerImageProcessor.from_pretrained(checkpoint, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "size_divisibility" in kwargs: image_processor_dict["size_divisibility"] = kwargs.pop("size_divisibility") return super().from_dict(image_processor_dict, **kwargs) @property def size_divisibility(self): warnings.warn( "The `size_divisibility` property is deprecated and will be removed in v4.27. Please use " "`size_divisor` instead.", FutureWarning, ) return self.size_divisor @property def max_size(self): warnings.warn( "The `max_size` property is deprecated and will be removed in v4.27. Please use size['longest_edge']" " instead.", FutureWarning, ) return self.size["longest_edge"] def resize( self, image: np.ndarray, size: Dict[str, int], size_divisor: int = 0, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format=None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size, max_size = size["shortest_edge"], size["longest_edge"] elif "height" in size and "width" in size: size = (size["height"], size["width"]) max_size = None else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) size = get_mask2former_resize_output_image_size( image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, ) image = resize(image, size=size, resample=resample, data_format=data_format) return image def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) def convert_segmentation_map_to_binary_masks( self, segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, ): reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels ignore_index = ignore_index if ignore_index is not None else self.ignore_index return convert_segmentation_map_to_binary_masks( segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, reduce_labels=reduce_labels, ) def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature: return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs) def _preprocess( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ): if do_resize: image = self.resize(image, size=size, size_divisor=size_divisor, resample=resample) if do_rescale: image = self.rescale(image, rescale_factor=rescale_factor) if do_normalize: image = self.normalize(image, mean=image_mean, std=image_std) return image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) image = self._preprocess( image=image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, ) if data_format is not None: image = to_channel_dimension_format(image, data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = 0, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations added_channel_dim = False if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] # TODO: (Amy) # Remork segmentation map processing to include reducing labels and resizing which doesn't # drop segment IDs > 255. segmentation_map = self._preprocess( image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, size_divisor=size_divisor, do_rescale=False, do_normalize=False, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) return segmentation_map def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, instance_id_to_semantic_id: Optional[Dict[int, int]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, size_divisor: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ignore_index: Optional[int] = None, reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in a future version", FutureWarning, ) do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False, max_size=self._max_size) size_divisor = size_divisor if size_divisor is not None else self.size_divisor resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std ignore_index = ignore_index if ignore_index is not None else self.ignore_index reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels if do_resize is not None and size is None or size_divisor is None: raise ValueError("If `do_resize` is True, `size` and `size_divisor` must be provided.") if do_rescale is not None and rescale_factor is None: raise ValueError("If `do_rescale` is True, `rescale_factor` must be provided.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("If `do_normalize` is True, `image_mean` and `image_std` must be provided.") if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None and not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if not is_batched(images): images = [images] segmentation_maps = [segmentation_maps] if segmentation_maps is not None else None if segmentation_maps is not None and len(images) != len(segmentation_maps): raise ValueError("Images and segmentation maps must have the same length.") images = [ self._preprocess_image( image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, ) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask(segmentation_map, do_resize, size, size_divisor) for segmentation_map in segmentation_maps ] encoded_inputs = self.encode_inputs( images, segmentation_maps, instance_id_to_semantic_id, ignore_index, reduce_labels, return_tensors ) return encoded_inputs # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def encode_inputs( self, pixel_values_list: List[ImageInput], segmentation_maps: ImageInput = None, instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. Mask2Former addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps will be converted to lists of binary masks and their respective labels. Let's see an example, assuming `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for each mask. Args: pixel_values_list (`List[ImageInput]`): List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, width)`. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an instance segmentation map where each pixel represents an instance id. Can be provided as a single dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. """ ignore_index = self.ignore_index if ignore_index is None else ignore_index reduce_labels = self.reduce_labels if reduce_labels is None else reduce_labels if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument has no effect and will be removed in v4.27", FutureWarning ) pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors) if segmentation_maps is not None: mask_labels = [] class_labels = [] pad_size = get_max_height_width(pixel_values_list) # Convert to list of binary masks and labels for idx, segmentation_map in enumerate(segmentation_maps): segmentation_map = to_numpy_array(segmentation_map) if isinstance(instance_id_to_semantic_id, list): instance_id = instance_id_to_semantic_id[idx] else: instance_id = instance_id_to_semantic_id # Use instance2class_id mapping per image masks, classes = self.convert_segmentation_map_to_binary_masks( segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels ) # We add an axis to make them compatible with the transformations library # this will be removed in the future masks = [mask[None, ...] for mask in masks] masks = [ self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks ] masks = np.concatenate(masks, axis=0) mask_labels.append(torch.from_numpy(masks)) class_labels.append(torch.from_numpy(classes)) # we cannot batch them since they don't share a common class size encoded_inputs["mask_labels"] = mask_labels encoded_inputs["class_labels"] = class_labels return encoded_inputs def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`Mask2FormerForUniversalSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, return_binary_maps: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. return_binary_maps (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps (one per detected instance). Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if return_coco_annotation and return_binary_maps: raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.") # [batch_size, num_queries, num_classes+1] class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, height, width] masks_queries_logits = outputs.masks_queries_logits # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) device = masks_queries_logits.device num_classes = class_queries_logits.shape[-1] - 1 num_queries = class_queries_logits.shape[-2] # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(class_queries_logits.shape[0]): mask_pred = masks_queries_logits[i] mask_cls = class_queries_logits[i] scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1] labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = torch_int_div(topk_indices, num_classes) mask_pred = mask_pred[topk_indices] pred_masks = (mask_pred > 0).float() # Calculate average mask prob mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / ( pred_masks.flatten(1).sum(1) + 1e-6 ) pred_scores = scores_per_image * mask_scores_per_image pred_classes = labels_per_image segmentation = torch.zeros((384, 384)) - 1 if target_sizes is not None: segmentation = torch.zeros(target_sizes[i]) - 1 pred_masks = torch.nn.functional.interpolate( pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest" )[0] instance_maps, segments = [], [] current_segment_id = 0 for j in range(num_queries): score = pred_scores[j].item() if not torch.all(pred_masks[j] == 0) and score >= threshold: segmentation[pred_masks[j] == 1] = current_segment_id segments.append( { "id": current_segment_id, "label_id": pred_classes[j].item(), "was_fused": False, "score": round(score, 6), } ) current_segment_id += 1 instance_maps.append(pred_masks[j]) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) # Return a concatenated tensor of binary instance maps if return_binary_maps and len(instance_maps) != 0: segmentation = torch.stack(instance_maps, dim=0) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`Mask2FormerForUniversalSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`Mask2FormerForUniversalSegmentationOutput`]): The outputs from [`Mask2FormerForUniversalSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Scale back to preprocessed image size - (384, 384) for all models masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=(384, 384), mode="bilinear", align_corners=False ) batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/maskformer/image_processing_maskformer.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for MaskFormer.""" import math import warnings from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from transformers.image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, pad, rescale, resize, to_channel_dimension_format, to_numpy_array, ) from transformers.image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, valid_images, ) from transformers.utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, is_torch_available, is_torch_tensor, logging, ) logger = logging.get_logger(__name__) if TYPE_CHECKING: from transformers import MaskFormerForInstanceSegmentationOutput if is_torch_available(): import torch from torch import nn # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments # TODO: (Amy) Move to image_transforms def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, ): if reduce_labels and ignore_index is None: raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.") if reduce_labels: segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) # Get unique ids (class or instance ids based on input) all_labels = np.unique(segmentation_map) # Drop background label if applicable if ignore_index is not None: all_labels = all_labels[all_labels != ignore_index] # Generate a binary mask for each object instance binary_masks = [(segmentation_map == i) for i in all_labels] binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) # Convert instance ids to class ids if instance_id_to_semantic_id is not None: labels = np.zeros(all_labels.shape[0]) for label in all_labels: class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label] labels[all_labels == label] = class_id - 1 if reduce_labels else class_id else: labels = all_labels return binary_masks.astype(np.float32), labels.astype(np.int64) def get_maskformer_resize_output_image_size( image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int] = None, size_divisor: int = 0, default_to_square: bool = True, ) -> tuple: """ Computes the output size given the desired size. Args: input_image (`np.ndarray`): The input image. size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): The size of the output image. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. max_size (`int`, *optional*): The maximum size of the output image. size_divisible (`int`, *optional*, defaults to `0`): If size_divisible is given, the output image size will be divisible by the number. Returns: `Tuple[int, int]`: The output size. """ output_size = get_resize_output_image_size( input_image=image, size=size, default_to_square=default_to_square, max_size=max_size ) if size_divisor > 0: height, width = output_size height = int(math.ceil(height / size_divisor) * size_divisor) width = int(math.ceil(width / size_divisor) * size_divisor) output_size = (height, width) return output_size class MaskFormerImageProcessor(BaseImageProcessor): r""" Constructs a MaskFormer image processor. The image processor can be used to prepare image(s) and optional targets for the model. This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. size_divisor (`int`, *optional*, defaults to 32): Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in Swin Transformer. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input to a certain `scale`. rescale_factor (`float`, *optional*, defaults to 1/ 255): Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels denoted with 0 (background) will be replaced with `ignore_index`. do_reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by `ignore_index`. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, size_divisor: int = 32, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, **kwargs, ): if "size_divisibility" in kwargs: warnings.warn( "The `size_divisibility` argument is deprecated and will be removed in v4.27. Please use " "`size_divisor` instead.", FutureWarning, ) size_divisor = kwargs.pop("size_divisibility") if "max_size" in kwargs: warnings.warn( "The `max_size` argument is deprecated and will be removed in v4.27. Please use size['longest_edge']" " instead.", FutureWarning, ) # We make max_size a private attribute so we can pass it as a default value in the preprocess method whilst # `size` can still be pass in as an int self._max_size = kwargs.pop("max_size") else: self._max_size = 1333 if "reduce_labels" in kwargs: warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in v4.27. Please use " "`do_reduce_labels` instead.", FutureWarning, ) do_reduce_labels = kwargs.pop("reduce_labels") size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} size = get_size_dict(size, max_size=self._max_size, default_to_square=False) super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.ignore_index = ignore_index self.do_reduce_labels = do_reduce_labels @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `MaskFormerImageProcessor.from_pretrained(checkpoint, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "size_divisibility" in kwargs: image_processor_dict["size_divisibility"] = kwargs.pop("size_divisibility") return super().from_dict(image_processor_dict, **kwargs) @property def size_divisibility(self): warnings.warn( "The `size_divisibility` property is deprecated and will be removed in v4.27. Please use " "`size_divisor` instead.", FutureWarning, ) return self.size_divisor @property def max_size(self): warnings.warn( "The `max_size` property is deprecated and will be removed in v4.27. Please use size['longest_edge']" " instead.", FutureWarning, ) return self.size["longest_edge"] @property def reduce_labels(self): warnings.warn( "The `reduce_labels` property is deprecated and will be removed in v4.27. Please use " "`do_reduce_labels` instead.", FutureWarning, ) return self.do_reduce_labels def resize( self, image: np.ndarray, size: Dict[str, int], size_divisor: int = 0, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format=None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size, max_size = size["shortest_edge"], size["longest_edge"] elif "height" in size and "width" in size: size = (size["height"], size["width"]) max_size = None else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) size = get_maskformer_resize_output_image_size( image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, ) image = resize(image, size=size, resample=resample, data_format=data_format) return image def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) def convert_segmentation_map_to_binary_masks( self, segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, **kwargs, ): reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels ignore_index = ignore_index if ignore_index is not None else self.ignore_index return convert_segmentation_map_to_binary_masks( segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, reduce_labels=reduce_labels, ) def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature: return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs) def _preprocess( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ): if do_resize: image = self.resize(image, size=size, size_divisor=size_divisor, resample=resample) if do_rescale: image = self.rescale(image, rescale_factor=rescale_factor) if do_normalize: image = self.normalize(image, mean=image_mean, std=image_std) return image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) image = self._preprocess( image=image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, ) if data_format is not None: image = to_channel_dimension_format(image, data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = 0, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations added_channel_dim = False if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] # TODO: (Amy) # Remork segmentation map processing to include reducing labels and resizing which doesn't # drop segment IDs > 255. segmentation_map = self._preprocess( image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, size_divisor=size_divisor, do_rescale=False, do_normalize=False, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) return segmentation_map def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, instance_id_to_semantic_id: Optional[Dict[int, int]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, size_divisor: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in v4.27", FutureWarning, ) if "reduce_labels" in kwargs: warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in v4.27. Please use" " `do_reduce_labels` instead.", FutureWarning, ) if do_reduce_labels is not None: raise ValueError( "Cannot use both `reduce_labels` and `do_reduce_labels`. Please use `do_reduce_labels` instead." ) do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False, max_size=self._max_size) size_divisor = size_divisor if size_divisor is not None else self.size_divisor resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std ignore_index = ignore_index if ignore_index is not None else self.ignore_index do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels if do_resize is not None and size is None or size_divisor is None: raise ValueError("If `do_resize` is True, `size` and `size_divisor` must be provided.") if do_rescale is not None and rescale_factor is None: raise ValueError("If `do_rescale` is True, `rescale_factor` must be provided.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("If `do_normalize` is True, `image_mean` and `image_std` must be provided.") if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None and not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) images = make_list_of_images(images) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if segmentation_maps is not None and len(images) != len(segmentation_maps): raise ValueError("Images and segmentation maps must have the same length.") images = [ self._preprocess_image( image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, ) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask(segmentation_map, do_resize, size, size_divisor) for segmentation_map in segmentation_maps ] encoded_inputs = self.encode_inputs( images, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors ) return encoded_inputs # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def encode_inputs( self, pixel_values_list: List[ImageInput], segmentation_maps: ImageInput = None, instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. MaskFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps will be converted to lists of binary masks and their respective labels. Let's see an example, assuming `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for each mask. Args: pixel_values_list (`List[ImageInput]`): List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, width)`. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an instance segmentation map where each pixel represents an instance id. Can be provided as a single dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument has no effect and will be removed in v4.27", FutureWarning ) ignore_index = self.ignore_index if ignore_index is None else ignore_index reduce_labels = self.do_reduce_labels if reduce_labels is None else reduce_labels pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors) if segmentation_maps is not None: mask_labels = [] class_labels = [] pad_size = get_max_height_width(pixel_values_list) # Convert to list of binary masks and labels for idx, segmentation_map in enumerate(segmentation_maps): segmentation_map = to_numpy_array(segmentation_map) if isinstance(instance_id_to_semantic_id, list): instance_id = instance_id_to_semantic_id[idx] else: instance_id = instance_id_to_semantic_id # Use instance2class_id mapping per image masks, classes = self.convert_segmentation_map_to_binary_masks( segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels ) # We add an axis to make them compatible with the transformations library # this will be removed in the future masks = [mask[None, ...] for mask in masks] masks = [ self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks ] masks = np.concatenate(masks, axis=0) mask_labels.append(torch.from_numpy(masks)) class_labels.append(torch.from_numpy(classes)) # we cannot batch them since they don't share a common class size encoded_inputs["mask_labels"] = mask_labels encoded_inputs["class_labels"] = class_labels return encoded_inputs def post_process_segmentation( self, outputs: "MaskFormerForInstanceSegmentationOutput", target_size: Tuple[int, int] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. target_size (`Tuple[int, int]`, *optional*): If set, the `masks_queries_logits` will be resized to `target_size`. Returns: `torch.Tensor`: A tensor of shape (`batch_size, num_class_labels, height, width`). """ logger.warning( "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_instance_segmentation`", FutureWarning, ) # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] class_queries_logits = outputs.class_queries_logits # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_queries_logits = outputs.masks_queries_logits if target_size is not None: masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=target_size, mode="bilinear", align_corners=False, ) # remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_probs = masks_queries_logits.sigmoid() # now we want to sum over the queries, # $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $ # where $ softmax(p) \in R^{q, c} $ is the mask classes # and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities # b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) return segmentation def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, return_binary_maps: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. return_binary_maps (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps (one per detected instance). Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if return_coco_annotation and return_binary_maps: raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.") # [batch_size, num_queries, num_classes+1] class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, height, width] masks_queries_logits = outputs.masks_queries_logits device = masks_queries_logits.device num_classes = class_queries_logits.shape[-1] - 1 num_queries = class_queries_logits.shape[-2] # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(class_queries_logits.shape[0]): mask_pred = masks_queries_logits[i] mask_cls = class_queries_logits[i] scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1] labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = topk_indices // num_classes mask_pred = mask_pred[topk_indices] pred_masks = (mask_pred > 0).float() # Calculate average mask prob mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / ( pred_masks.flatten(1).sum(1) + 1e-6 ) pred_scores = scores_per_image * mask_scores_per_image pred_classes = labels_per_image segmentation = torch.zeros(masks_queries_logits.shape[2:]) - 1 if target_sizes is not None: segmentation = torch.zeros(target_sizes[i]) - 1 pred_masks = torch.nn.functional.interpolate( pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest" )[0] instance_maps, segments = [], [] current_segment_id = 0 for j in range(num_queries): score = pred_scores[j].item() if not torch.all(pred_masks[j] == 0) and score >= threshold: segmentation[pred_masks[j] == 1] = current_segment_id segments.append( { "id": current_segment_id, "label_id": pred_classes[j].item(), "was_fused": False, "score": round(score, 6), } ) current_segment_id += 1 instance_maps.append(pred_masks[j]) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) # Return a concatenated tensor of binary instance maps if return_binary_maps and len(instance_maps) != 0: segmentation = torch.stack(instance_maps, dim=0) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for MaskFormer.""" import math import warnings from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from transformers.image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, pad, rescale, resize, to_channel_dimension_format, to_numpy_array, ) from transformers.image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, valid_images, ) from transformers.utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, is_torch_available, is_torch_tensor, logging, ) logger = logging.get_logger(__name__) if TYPE_CHECKING: from transformers import MaskFormerForInstanceSegmentationOutput if is_torch_available(): import torch from torch import nn from ...pytorch_utils import torch_int_div # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments # TODO: (Amy) Move to image_transforms def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, ): if reduce_labels and ignore_index is None: raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.") if reduce_labels: segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) # Get unique ids (class or instance ids based on input) all_labels = np.unique(segmentation_map) # Drop background label if applicable if ignore_index is not None: all_labels = all_labels[all_labels != ignore_index] # Generate a binary mask for each object instance binary_masks = [(segmentation_map == i) for i in all_labels] binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) # Convert instance ids to class ids if instance_id_to_semantic_id is not None: labels = np.zeros(all_labels.shape[0]) for label in all_labels: class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label] labels[all_labels == label] = class_id - 1 if reduce_labels else class_id else: labels = all_labels return binary_masks.astype(np.float32), labels.astype(np.int64) def get_maskformer_resize_output_image_size( image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int] = None, size_divisor: int = 0, default_to_square: bool = True, ) -> tuple: """ Computes the output size given the desired size. Args: input_image (`np.ndarray`): The input image. size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): The size of the output image. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. max_size (`int`, *optional*): The maximum size of the output image. size_divisible (`int`, *optional*, defaults to `0`): If size_divisible is given, the output image size will be divisible by the number. Returns: `Tuple[int, int]`: The output size. """ output_size = get_resize_output_image_size( input_image=image, size=size, default_to_square=default_to_square, max_size=max_size ) if size_divisor > 0: height, width = output_size height = int(math.ceil(height / size_divisor) * size_divisor) width = int(math.ceil(width / size_divisor) * size_divisor) output_size = (height, width) return output_size class MaskFormerImageProcessor(BaseImageProcessor): r""" Constructs a MaskFormer image processor. The image processor can be used to prepare image(s) and optional targets for the model. This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. size_divisor (`int`, *optional*, defaults to 32): Some backbones need images divisible by a certain number. If not passed, it defaults to the value used in Swin Transformer. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input to a certain `scale`. rescale_factor (`float`, *optional*, defaults to 1/ 255): Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels denoted with 0 (background) will be replaced with `ignore_index`. do_reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by `ignore_index`. """ model_input_names = ["pixel_values", "pixel_mask"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, size_divisor: int = 32, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, **kwargs, ): if "size_divisibility" in kwargs: warnings.warn( "The `size_divisibility` argument is deprecated and will be removed in v4.27. Please use " "`size_divisor` instead.", FutureWarning, ) size_divisor = kwargs.pop("size_divisibility") if "max_size" in kwargs: warnings.warn( "The `max_size` argument is deprecated and will be removed in v4.27. Please use size['longest_edge']" " instead.", FutureWarning, ) # We make max_size a private attribute so we can pass it as a default value in the preprocess method whilst # `size` can still be pass in as an int self._max_size = kwargs.pop("max_size") else: self._max_size = 1333 if "reduce_labels" in kwargs: warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in v4.27. Please use " "`do_reduce_labels` instead.", FutureWarning, ) do_reduce_labels = kwargs.pop("reduce_labels") size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} size = get_size_dict(size, max_size=self._max_size, default_to_square=False) super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.size_divisor = size_divisor self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.ignore_index = ignore_index self.do_reduce_labels = do_reduce_labels @classmethod def from_dict(cls, image_processor_dict: Dict[str, Any], **kwargs): """ Overrides the `from_dict` method from the base class to make sure parameters are updated if image processor is created using from_dict and kwargs e.g. `MaskFormerImageProcessor.from_pretrained(checkpoint, max_size=800)` """ image_processor_dict = image_processor_dict.copy() if "max_size" in kwargs: image_processor_dict["max_size"] = kwargs.pop("max_size") if "size_divisibility" in kwargs: image_processor_dict["size_divisibility"] = kwargs.pop("size_divisibility") return super().from_dict(image_processor_dict, **kwargs) @property def size_divisibility(self): warnings.warn( "The `size_divisibility` property is deprecated and will be removed in v4.27. Please use " "`size_divisor` instead.", FutureWarning, ) return self.size_divisor @property def max_size(self): warnings.warn( "The `max_size` property is deprecated and will be removed in v4.27. Please use size['longest_edge']" " instead.", FutureWarning, ) return self.size["longest_edge"] @property def reduce_labels(self): warnings.warn( "The `reduce_labels` property is deprecated and will be removed in v4.27. Please use " "`do_reduce_labels` instead.", FutureWarning, ) return self.do_reduce_labels def resize( self, image: np.ndarray, size: Dict[str, int], size_divisor: int = 0, resample: PILImageResampling = PILImageResampling.BILINEAR, data_format=None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size, max_size = size["shortest_edge"], size["longest_edge"] elif "height" in size and "width" in size: size = (size["height"], size["width"]) max_size = None else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) size = get_maskformer_resize_output_image_size( image=image, size=size, max_size=max_size, size_divisor=size_divisor, default_to_square=False, ) image = resize(image, size=size, resample=resample, data_format=data_format) return image def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) def convert_segmentation_map_to_binary_masks( self, segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, **kwargs, ): reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels ignore_index = ignore_index if ignore_index is not None else self.ignore_index return convert_segmentation_map_to_binary_masks( segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, reduce_labels=reduce_labels, ) def __call__(self, images, segmentation_maps=None, **kwargs) -> BatchFeature: return self.preprocess(images, segmentation_maps=segmentation_maps, **kwargs) def _preprocess( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ): if do_resize: image = self.resize(image, size=size, size_divisor=size_divisor, resample=resample) if do_rescale: image = self.rescale(image, rescale_factor=rescale_factor) if do_normalize: image = self.normalize(image, mean=image_mean, std=image_std) return image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) image = self._preprocess( image=image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, ) if data_format is not None: image = to_channel_dimension_format(image, data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, size_divisor: int = 0, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations added_channel_dim = False if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] # TODO: (Amy) # Remork segmentation map processing to include reducing labels and resizing which doesn't # drop segment IDs > 255. segmentation_map = self._preprocess( image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, size_divisor=size_divisor, do_rescale=False, do_normalize=False, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) return segmentation_map def preprocess( self, images: ImageInput, segmentation_maps: Optional[ImageInput] = None, instance_id_to_semantic_id: Optional[Dict[int, int]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, size_divisor: Optional[int] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in v4.27", FutureWarning, ) if "reduce_labels" in kwargs: warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in v4.27. Please use" " `do_reduce_labels` instead.", FutureWarning, ) if do_reduce_labels is not None: raise ValueError( "Cannot use both `reduce_labels` and `do_reduce_labels`. Please use `do_reduce_labels` instead." ) do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False, max_size=self._max_size) size_divisor = size_divisor if size_divisor is not None else self.size_divisor resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std ignore_index = ignore_index if ignore_index is not None else self.ignore_index do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels if do_resize is not None and size is None or size_divisor is None: raise ValueError("If `do_resize` is True, `size` and `size_divisor` must be provided.") if do_rescale is not None and rescale_factor is None: raise ValueError("If `do_rescale` is True, `rescale_factor` must be provided.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("If `do_normalize` is True, `image_mean` and `image_std` must be provided.") if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None and not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) images = make_list_of_images(images) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if segmentation_maps is not None and len(images) != len(segmentation_maps): raise ValueError("Images and segmentation maps must have the same length.") images = [ self._preprocess_image( image, do_resize=do_resize, size=size, size_divisor=size_divisor, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, ) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask(segmentation_map, do_resize, size, size_divisor) for segmentation_map in segmentation_maps ] encoded_inputs = self.encode_inputs( images, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors ) return encoded_inputs # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def encode_inputs( self, pixel_values_list: List[ImageInput], segmentation_maps: ImageInput = None, instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. MaskFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps will be converted to lists of binary masks and their respective labels. Let's see an example, assuming `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for each mask. Args: pixel_values_list (`List[ImageInput]`): List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, width)`. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an instance segmentation map where each pixel represents an instance id. Can be provided as a single dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument has no effect and will be removed in v4.27", FutureWarning ) ignore_index = self.ignore_index if ignore_index is None else ignore_index reduce_labels = self.do_reduce_labels if reduce_labels is None else reduce_labels pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors) if segmentation_maps is not None: mask_labels = [] class_labels = [] pad_size = get_max_height_width(pixel_values_list) # Convert to list of binary masks and labels for idx, segmentation_map in enumerate(segmentation_maps): segmentation_map = to_numpy_array(segmentation_map) if isinstance(instance_id_to_semantic_id, list): instance_id = instance_id_to_semantic_id[idx] else: instance_id = instance_id_to_semantic_id # Use instance2class_id mapping per image masks, classes = self.convert_segmentation_map_to_binary_masks( segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels ) # We add an axis to make them compatible with the transformations library # this will be removed in the future masks = [mask[None, ...] for mask in masks] masks = [ self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks ] masks = np.concatenate(masks, axis=0) mask_labels.append(torch.from_numpy(masks)) class_labels.append(torch.from_numpy(classes)) # we cannot batch them since they don't share a common class size encoded_inputs["mask_labels"] = mask_labels encoded_inputs["class_labels"] = class_labels return encoded_inputs def post_process_segmentation( self, outputs: "MaskFormerForInstanceSegmentationOutput", target_size: Tuple[int, int] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. target_size (`Tuple[int, int]`, *optional*): If set, the `masks_queries_logits` will be resized to `target_size`. Returns: `torch.Tensor`: A tensor of shape (`batch_size, num_class_labels, height, width`). """ logger.warning( "`post_process_segmentation` is deprecated and will be removed in v5 of Transformers, please use" " `post_process_instance_segmentation`", FutureWarning, ) # class_queries_logits has shape [BATCH, QUERIES, CLASSES + 1] class_queries_logits = outputs.class_queries_logits # masks_queries_logits has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_queries_logits = outputs.masks_queries_logits if target_size is not None: masks_queries_logits = torch.nn.functional.interpolate( masks_queries_logits, size=target_size, mode="bilinear", align_corners=False, ) # remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] # mask probs has shape [BATCH, QUERIES, HEIGHT, WIDTH] masks_probs = masks_queries_logits.sigmoid() # now we want to sum over the queries, # $ out_{c,h,w} = \sum_q p_{q,c} * m_{q,h,w} $ # where $ softmax(p) \in R^{q, c} $ is the mask classes # and $ sigmoid(m) \in R^{q, h, w}$ is the mask probabilities # b(atch)q(uery)c(lasses), b(atch)q(uery)h(eight)w(idth) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) return segmentation def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, return_binary_maps: Optional[bool] = False, ) -> List[Dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into instance segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned in COCO run-length encoding (RLE) format. return_binary_maps (`bool`, *optional*, defaults to `False`): If set to `True`, segmentation maps are returned as a concatenated tensor of binary segmentation maps (one per detected instance). Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- A tensor of shape `(height, width)` where each pixel represents a `segment_id` or `List[List]` run-length encoding (RLE) of the segmentation map if return_coco_annotation is set to `True`. Set to `None` if no mask if found above `threshold`. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- An integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if return_coco_annotation and return_binary_maps: raise ValueError("return_coco_annotation and return_binary_maps can not be both set to True.") # [batch_size, num_queries, num_classes+1] class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, height, width] masks_queries_logits = outputs.masks_queries_logits device = masks_queries_logits.device num_classes = class_queries_logits.shape[-1] - 1 num_queries = class_queries_logits.shape[-2] # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(class_queries_logits.shape[0]): mask_pred = masks_queries_logits[i] mask_cls = class_queries_logits[i] scores = torch.nn.functional.softmax(mask_cls, dim=-1)[:, :-1] labels = torch.arange(num_classes, device=device).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = torch_int_div(topk_indices, num_classes) mask_pred = mask_pred[topk_indices] pred_masks = (mask_pred > 0).float() # Calculate average mask prob mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / ( pred_masks.flatten(1).sum(1) + 1e-6 ) pred_scores = scores_per_image * mask_scores_per_image pred_classes = labels_per_image segmentation = torch.zeros(masks_queries_logits.shape[2:]) - 1 if target_sizes is not None: segmentation = torch.zeros(target_sizes[i]) - 1 pred_masks = torch.nn.functional.interpolate( pred_masks.unsqueeze(0), size=target_sizes[i], mode="nearest" )[0] instance_maps, segments = [], [] current_segment_id = 0 for j in range(num_queries): score = pred_scores[j].item() if not torch.all(pred_masks[j] == 0) and score >= threshold: segmentation[pred_masks[j] == 1] = current_segment_id segments.append( { "id": current_segment_id, "label_id": pred_classes[j].item(), "was_fused": False, "score": round(score, 6), } ) current_segment_id += 1 instance_maps.append(pred_masks[j]) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) # Return a concatenated tensor of binary instance maps if return_binary_maps and len(instance_maps) != 0: segmentation = torch.stack(instance_maps, dim=0) results.append({"segmentation": segmentation, "segments_info": segments}) return results def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/oneformer/image_processing_oneformer.py
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for OneFormer.""" import json import warnings from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from huggingface_hub import hf_hub_download from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from transformers.image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, pad, rescale, resize, to_channel_dimension_format, to_numpy_array, ) from transformers.image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, valid_images, ) from transformers.utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, is_torch_available, is_torch_tensor, logging, ) logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments # Copied from transformers.models.maskformer.image_processing_maskformer.convert_segmentation_map_to_binary_masks def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, ): if reduce_labels and ignore_index is None: raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.") if reduce_labels: segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) # Get unique ids (class or instance ids based on input) all_labels = np.unique(segmentation_map) # Drop background label if applicable if ignore_index is not None: all_labels = all_labels[all_labels != ignore_index] # Generate a binary mask for each object instance binary_masks = [(segmentation_map == i) for i in all_labels] binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) # Convert instance ids to class ids if instance_id_to_semantic_id is not None: labels = np.zeros(all_labels.shape[0]) for label in all_labels: class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label] labels[all_labels == label] = class_id - 1 if reduce_labels else class_id else: labels = all_labels return binary_masks.astype(np.float32), labels.astype(np.int64) def get_oneformer_resize_output_image_size( image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int] = None, default_to_square: bool = True, ) -> tuple: """ Computes the output size given the desired size. Args: input_image (`np.ndarray`): The input image. size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): The size of the output image. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. max_size (`int`, *optional*): The maximum size of the output image. Returns: `Tuple[int, int]`: The output size. """ output_size = get_resize_output_image_size( input_image=image, size=size, default_to_square=default_to_square, max_size=max_size ) return output_size def prepare_metadata(repo_path, class_info_file): with open(hf_hub_download(repo_path, class_info_file, repo_type="dataset"), "r") as f: class_info = json.load(f) metadata = {} class_names = [] thing_ids = [] for key, info in class_info.items(): metadata[key] = info["name"] class_names.append(info["name"]) if info["isthing"]: thing_ids.append(int(key)) metadata["thing_ids"] = thing_ids metadata["class_names"] = class_names return metadata class OneFormerImageProcessor(BaseImageProcessor): r""" Constructs a OneFormer image processor. The image processor can be used to prepare image(s), task input(s) and optional text inputs and targets for the model. This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input to a certain `scale`. rescale_factor (`float`, *optional*, defaults to 1/ 255): Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels denoted with 0 (background) will be replaced with `ignore_index`. do_reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by `ignore_index`. repo_path (`str`, defaults to `shi-labs/oneformer_demo`): Dataset repository on huggingface hub containing the JSON file with class information for the dataset. class_info_file (`str`): JSON file containing class information for the dataset. It is stored inside on the `repo_path` dataset repository. num_text (`int`, *optional*): Number of text entries in the text input list. """ model_input_names = ["pixel_values", "pixel_mask", "task_inputs"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, repo_path: str = "shi-labs/oneformer_demo", class_info_file: str = None, num_text: Optional[int] = None, **kwargs, ): if "max_size" in kwargs: self._max_size = kwargs.pop("max_size") else: self._max_size = 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} size = get_size_dict(size, max_size=self._max_size, default_to_square=False) if "reduce_labels" in kwargs: warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in v4.27. " "Please use `do_reduce_labels` instead.", FutureWarning, ) do_reduce_labels = kwargs.pop("reduce_labels") super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.ignore_index = ignore_index self.do_reduce_labels = do_reduce_labels self.class_info_file = class_info_file self.repo_path = repo_path self.metadata = prepare_metadata(repo_path, class_info_file) self.num_text = num_text def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format=None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size, max_size = size["shortest_edge"], size["longest_edge"] elif "height" in size and "width" in size: size = (size["height"], size["width"]) max_size = None else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) size = get_oneformer_resize_output_image_size( image=image, size=size, max_size=max_size, default_to_square=False, ) image = resize(image, size=size, resample=resample, data_format=data_format) return image # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.convert_segmentation_map_to_binary_masks def convert_segmentation_map_to_binary_masks( self, segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, **kwargs, ): reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels ignore_index = ignore_index if ignore_index is not None else self.ignore_index return convert_segmentation_map_to_binary_masks( segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, reduce_labels=reduce_labels, ) def __call__(self, images, task_inputs=None, segmentation_maps=None, **kwargs) -> BatchFeature: return self.preprocess(images, task_inputs=task_inputs, segmentation_maps=segmentation_maps, **kwargs) def _preprocess( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ): if do_resize: image = self.resize(image, size=size, resample=resample) if do_rescale: image = self.rescale(image, rescale_factor=rescale_factor) if do_normalize: image = self.normalize(image, mean=image_mean, std=image_std) return image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) image = self._preprocess( image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, ) if data_format is not None: image = to_channel_dimension_format(image, data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations added_channel_dim = False if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] # TODO: (Amy) # Remork segmentation map processing to include reducing labels and resizing which doesn't # drop segment IDs > 255. segmentation_map = self._preprocess( image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, do_rescale=False, do_normalize=False, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) return segmentation_map def preprocess( self, images: ImageInput, task_inputs: Optional[List[str]] = None, segmentation_maps: Optional[ImageInput] = None, instance_id_to_semantic_id: Optional[Dict[int, int]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in v4.27", FutureWarning, ) if "reduce_labels" in kwargs: warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in a v4.27. Please use" " `do_reduce_labels` instead.", FutureWarning, ) if do_reduce_labels is not None: raise ValueError( "You cannot use both `reduce_labels` and `do_reduce_labels` arguments. Please use" " `do_reduce_labels` instead." ) do_reduce_labels = kwargs.pop("reduce_labels") if task_inputs is None: # Default value task_inputs = ["panoptic"] do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False, max_size=self._max_size) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std ignore_index = ignore_index if ignore_index is not None else self.ignore_index do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels if do_resize is not None and size is None: raise ValueError("If `do_resize` is True, `size` must be provided.") if do_rescale is not None and rescale_factor is None: raise ValueError("If `do_rescale` is True, `rescale_factor` must be provided.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("If `do_normalize` is True, `image_mean` and `image_std` must be provided.") if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None and not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) images = make_list_of_images(images) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if segmentation_maps is not None and len(images) != len(segmentation_maps): raise ValueError("Images and segmentation maps must have the same length.") images = [ self._preprocess_image( image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, ) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask(segmentation_map, do_resize, size) for segmentation_map in segmentation_maps ] encoded_inputs = self.encode_inputs( images, task_inputs, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors, ) return encoded_inputs # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def get_semantic_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["a semantic photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx] if not np.all(mask is False): if class_id not in classes: cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 else: idx = classes.index(class_id) masks[idx] += mask masks[idx] = np.clip(masks[idx], 0, 1) num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def get_instance_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["an instance photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx] if class_id in self.metadata["thing_ids"]: if not np.all(mask is False): cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def get_panoptic_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["an panoptic photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx].data if not np.all(mask is False): cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def encode_inputs( self, pixel_values_list: List[ImageInput], task_inputs: List[str], segmentation_maps: ImageInput = None, instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. OneFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps will be converted to lists of binary masks and their respective labels. Let's see an example, assuming `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for each mask. Args: pixel_values_list (`List[ImageInput]`): List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, width)`. task_inputs (`List[str]`): List of task values. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an instance segmentation map where each pixel represents an instance id. Can be provided as a single dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. - **text_inputs** -- Optional list of text string entries to be fed to a model (when `annotations` are provided). They identify the binary masks present in the image. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument has no effect and will be removed in v4.27", FutureWarning ) ignore_index = self.ignore_index if ignore_index is None else ignore_index reduce_labels = self.do_reduce_labels if reduce_labels is None else reduce_labels pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] pad_size = get_max_height_width(pixel_values_list) encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors) annotations = None if segmentation_maps is not None: segmentation_maps = map(np.array, segmentation_maps) annotations = [] for idx, segmentation_map in enumerate(segmentation_maps): # Use instance2class_id mapping per image if isinstance(instance_id_to_semantic_id, list): instance_id = instance_id_to_semantic_id[idx] else: instance_id = instance_id_to_semantic_id # Use instance2class_id mapping per image masks, classes = self.convert_segmentation_map_to_binary_masks( segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels ) annotations.append({"masks": masks, "classes": classes}) if annotations is not None: mask_labels = [] class_labels = [] text_inputs = [] num_class_obj = {} for cls_name in self.metadata["class_names"]: num_class_obj[cls_name] = 0 for i, label in enumerate(annotations): task = task_inputs[i] if task == "semantic": classes, masks, texts = self.get_semantic_annotations(label, num_class_obj) elif task == "instance": classes, masks, texts = self.get_instance_annotations(label, num_class_obj) elif task == "panoptic": classes, masks, texts = self.get_panoptic_annotations(label, num_class_obj) else: raise ValueError(f"{task} was not expected, expected `semantic`, `instance` or `panoptic`") # we cannot batch them since they don't share a common class size masks = [mask[None, ...] for mask in masks] masks = [ self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks ] masks = np.concatenate(masks, axis=0) mask_labels.append(torch.from_numpy(masks)) class_labels.append(torch.from_numpy(classes).long()) text_inputs.append(texts) encoded_inputs["mask_labels"] = mask_labels encoded_inputs["class_labels"] = class_labels encoded_inputs["text_inputs"] = text_inputs # This needs to be tokenized before sending to the model. encoded_inputs["task_inputs"] = [f"the task is {task_input}" for task_input in task_inputs] return encoded_inputs # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_semantic_segmentation def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, task_type: str = "instance", is_demo: bool = True, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ): """ Converts the output of [`OneFormerForUniversalSegmentationOutput`] into image instance segmentation predictions. Only supports PyTorch. Args: outputs ([`OneFormerForUniversalSegmentationOutput`]): The outputs from [`OneFormerForUniversalSegmentationOutput`]. task_type (`str`, *optional)*, defaults to "instance"): The post processing depends on the task token input. If the `task_type` is "panoptic", we need to ignore the stuff predictions. is_demo (`bool`, *optional)*, defaults to `True`): Whether the model is in demo mode. If true, use threshold to predict final masks. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional)*, defaults to `False`): Whether to return predictions in COCO format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_queries = class_queries_logits.shape[1] num_classes = class_queries_logits.shape[-1] - 1 # Loop over items in batch size results: List[Dict[str, torch.Tensor]] = [] for i in range(batch_size): # [Q, K] scores = torch.nn.functional.softmax(class_queries_logits[i], dim=-1)[:, :-1] labels = torch.arange(num_classes).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = topk_indices // num_classes # mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1) mask_pred = masks_queries_logits[i][topk_indices] # Only consider scores with confidence over [threshold] for demo if is_demo: keep = scores_per_image > threshold scores_per_image = scores_per_image[keep] labels_per_image = labels_per_image[keep] mask_pred = mask_pred[keep] # if this is panoptic segmentation, we only keep the "thing" classes if task_type == "panoptic": keep = torch.zeros_like(scores_per_image).bool() for i, lab in enumerate(labels_per_image): keep[i] = lab in self.metadata["thing_ids"] scores_per_image = scores_per_image[keep] labels_per_image = labels_per_image[keep] mask_pred = mask_pred[keep] if mask_pred.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_pred.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue if "ade20k" in self.class_info_file and not is_demo and "instance" in task_type: for i in range(labels_per_image.shape[0]): labels_per_image[i] = self.metadata["thing_ids"].index(labels_per_image[i].item()) # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_pred, scores_per_image, labels_per_image, mask_threshold, overlap_mask_area_threshold, set(), target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_panoptic_segmentation def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
# coding=utf-8 # Copyright 2022 SHI Labs and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for OneFormer.""" import json import warnings from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union import numpy as np from huggingface_hub import hf_hub_download from transformers.image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from transformers.image_transforms import ( PaddingMode, get_resize_output_image_size, normalize, pad, rescale, resize, to_channel_dimension_format, to_numpy_array, ) from transformers.image_utils import ( ChannelDimension, ImageInput, PILImageResampling, get_image_size, infer_channel_dimension_format, make_list_of_images, valid_images, ) from transformers.utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, TensorType, is_torch_available, is_torch_tensor, logging, ) logger = logging.get_logger(__name__) if is_torch_available(): import torch from torch import nn from ...pytorch_utils import torch_int_div # Copied from transformers.models.detr.image_processing_detr.max_across_indices def max_across_indices(values: Iterable[Any]) -> List[Any]: """ Return the maximum value across all indices of an iterable of values. """ return [max(values_i) for values_i in zip(*values)] # Copied from transformers.models.detr.image_processing_detr.get_max_height_width def get_max_height_width(images: List[np.ndarray]) -> List[int]: """ Get the maximum height and width across all images in a batch. """ input_channel_dimension = infer_channel_dimension_format(images[0]) if input_channel_dimension == ChannelDimension.FIRST: _, max_height, max_width = max_across_indices([img.shape for img in images]) elif input_channel_dimension == ChannelDimension.LAST: max_height, max_width, _ = max_across_indices([img.shape for img in images]) else: raise ValueError(f"Invalid channel dimension format: {input_channel_dimension}") return (max_height, max_width) # Copied from transformers.models.detr.image_processing_detr.make_pixel_mask def make_pixel_mask(image: np.ndarray, output_size: Tuple[int, int]) -> np.ndarray: """ Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding. Args: image (`np.ndarray`): Image to make the pixel mask for. output_size (`Tuple[int, int]`): Output size of the mask. """ input_height, input_width = get_image_size(image) mask = np.zeros(output_size, dtype=np.int64) mask[:input_height, :input_width] = 1 return mask # Copied from transformers.models.detr.image_processing_detr.binary_mask_to_rle def binary_mask_to_rle(mask): """ Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format. Args: mask (`torch.Tensor` or `numpy.array`): A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target segment_id or class_id. Returns: `List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE format. """ if is_torch_tensor(mask): mask = mask.numpy() pixels = mask.flatten() pixels = np.concatenate([[0], pixels, [0]]) runs = np.where(pixels[1:] != pixels[:-1])[0] + 1 runs[1::2] -= runs[::2] return list(runs) # Copied from transformers.models.detr.image_processing_detr.convert_segmentation_to_rle def convert_segmentation_to_rle(segmentation): """ Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format. Args: segmentation (`torch.Tensor` or `numpy.array`): A segmentation map of shape `(height, width)` where each value denotes a segment or class id. Returns: `List[List]`: A list of lists, where each list is the run-length encoding of a segment / class id. """ segment_ids = torch.unique(segmentation) run_length_encodings = [] for idx in segment_ids: mask = torch.where(segmentation == idx, 1, 0) rle = binary_mask_to_rle(mask) run_length_encodings.append(rle) return run_length_encodings # Copied from transformers.models.detr.image_processing_detr.remove_low_and_no_objects def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels): """ Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and `labels`. Args: masks (`torch.Tensor`): A tensor of shape `(num_queries, height, width)`. scores (`torch.Tensor`): A tensor of shape `(num_queries)`. labels (`torch.Tensor`): A tensor of shape `(num_queries)`. object_mask_threshold (`float`): A number between 0 and 1 used to binarize the masks. Raises: `ValueError`: Raised when the first dimension doesn't match in all input tensors. Returns: `Tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region < `object_mask_threshold`. """ if not (masks.shape[0] == scores.shape[0] == labels.shape[0]): raise ValueError("mask, scores and labels must have the same shape!") to_keep = labels.ne(num_labels) & (scores > object_mask_threshold) return masks[to_keep], scores[to_keep], labels[to_keep] # Copied from transformers.models.detr.image_processing_detr.check_segment_validity def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8): # Get the mask associated with the k class mask_k = mask_labels == k mask_k_area = mask_k.sum() # Compute the area of all the stuff in query k original_area = (mask_probs[k] >= mask_threshold).sum() mask_exists = mask_k_area > 0 and original_area > 0 # Eliminate disconnected tiny segments if mask_exists: area_ratio = mask_k_area / original_area if not area_ratio.item() > overlap_mask_area_threshold: mask_exists = False return mask_exists, mask_k # Copied from transformers.models.detr.image_processing_detr.compute_segments def compute_segments( mask_probs, pred_scores, pred_labels, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_size: Tuple[int, int] = None, ): height = mask_probs.shape[1] if target_size is None else target_size[0] width = mask_probs.shape[2] if target_size is None else target_size[1] segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device) segments: List[Dict] = [] if target_size is not None: mask_probs = nn.functional.interpolate( mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False )[0] current_segment_id = 0 # Weigh each mask by its prediction score mask_probs *= pred_scores.view(-1, 1, 1) mask_labels = mask_probs.argmax(0) # [height, width] # Keep track of instances of each class stuff_memory_list: Dict[str, int] = {} for k in range(pred_labels.shape[0]): pred_class = pred_labels[k].item() should_fuse = pred_class in label_ids_to_fuse # Check if mask exists and large enough to be a segment mask_exists, mask_k = check_segment_validity( mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold ) if mask_exists: if pred_class in stuff_memory_list: current_segment_id = stuff_memory_list[pred_class] else: current_segment_id += 1 # Add current object segment to final segmentation map segmentation[mask_k] = current_segment_id segment_score = round(pred_scores[k].item(), 6) segments.append( { "id": current_segment_id, "label_id": pred_class, "was_fused": should_fuse, "score": segment_score, } ) if should_fuse: stuff_memory_list[pred_class] = current_segment_id return segmentation, segments # Copied from transformers.models.maskformer.image_processing_maskformer.convert_segmentation_map_to_binary_masks def convert_segmentation_map_to_binary_masks( segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, ): if reduce_labels and ignore_index is None: raise ValueError("If `reduce_labels` is True, `ignore_index` must be provided.") if reduce_labels: segmentation_map = np.where(segmentation_map == 0, ignore_index, segmentation_map - 1) # Get unique ids (class or instance ids based on input) all_labels = np.unique(segmentation_map) # Drop background label if applicable if ignore_index is not None: all_labels = all_labels[all_labels != ignore_index] # Generate a binary mask for each object instance binary_masks = [(segmentation_map == i) for i in all_labels] binary_masks = np.stack(binary_masks, axis=0) # (num_labels, height, width) # Convert instance ids to class ids if instance_id_to_semantic_id is not None: labels = np.zeros(all_labels.shape[0]) for label in all_labels: class_id = instance_id_to_semantic_id[label + 1 if reduce_labels else label] labels[all_labels == label] = class_id - 1 if reduce_labels else class_id else: labels = all_labels return binary_masks.astype(np.float32), labels.astype(np.int64) def get_oneformer_resize_output_image_size( image: np.ndarray, size: Union[int, Tuple[int, int], List[int], Tuple[int]], max_size: Optional[int] = None, default_to_square: bool = True, ) -> tuple: """ Computes the output size given the desired size. Args: input_image (`np.ndarray`): The input image. size (`int`, `Tuple[int, int]`, `List[int]`, `Tuple[int]`): The size of the output image. default_to_square (`bool`, *optional*, defaults to `True`): Whether to default to square if no size is provided. max_size (`int`, *optional*): The maximum size of the output image. Returns: `Tuple[int, int]`: The output size. """ output_size = get_resize_output_image_size( input_image=image, size=size, default_to_square=default_to_square, max_size=max_size ) return output_size def prepare_metadata(repo_path, class_info_file): with open(hf_hub_download(repo_path, class_info_file, repo_type="dataset"), "r") as f: class_info = json.load(f) metadata = {} class_names = [] thing_ids = [] for key, info in class_info.items(): metadata[key] = info["name"] class_names.append(info["name"]) if info["isthing"]: thing_ids.append(int(key)) metadata["thing_ids"] = thing_ids metadata["class_names"] = class_names return metadata class OneFormerImageProcessor(BaseImageProcessor): r""" Constructs a OneFormer image processor. The image processor can be used to prepare image(s), task input(s) and optional text inputs and targets for the model. This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the input to a certain `size`. size (`int`, *optional*, defaults to 800): Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size * height / width, size)`. max_size (`int`, *optional*, defaults to 1333): The largest size an image dimension can have (otherwise it's capped). Only has an effect if `do_resize` is set to `True`. resample (`int`, *optional*, defaults to `PIL.Image.Resampling.BILINEAR`): An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`, `PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`, `PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set to `True`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the input to a certain `scale`. rescale_factor (`float`, *optional*, defaults to 1/ 255): Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `True`): Whether or not to normalize the input with mean and standard deviation. image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`): The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean. image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`): The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the ImageNet std. ignore_index (`int`, *optional*): Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels denoted with 0 (background) will be replaced with `ignore_index`. do_reduce_labels (`bool`, *optional*, defaults to `False`): Whether or not to decrement all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background, and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by `ignore_index`. repo_path (`str`, defaults to `shi-labs/oneformer_demo`): Dataset repository on huggingface hub containing the JSON file with class information for the dataset. class_info_file (`str`): JSON file containing class information for the dataset. It is stored inside on the `repo_path` dataset repository. num_text (`int`, *optional*): Number of text entries in the text input list. """ model_input_names = ["pixel_values", "pixel_mask", "task_inputs"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BILINEAR, do_rescale: bool = True, rescale_factor: float = 1 / 255, do_normalize: bool = True, image_mean: Union[float, List[float]] = None, image_std: Union[float, List[float]] = None, ignore_index: Optional[int] = None, do_reduce_labels: bool = False, repo_path: str = "shi-labs/oneformer_demo", class_info_file: str = None, num_text: Optional[int] = None, **kwargs, ): if "max_size" in kwargs: self._max_size = kwargs.pop("max_size") else: self._max_size = 1333 size = size if size is not None else {"shortest_edge": 800, "longest_edge": self._max_size} size = get_size_dict(size, max_size=self._max_size, default_to_square=False) if "reduce_labels" in kwargs: warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in v4.27. " "Please use `do_reduce_labels` instead.", FutureWarning, ) do_reduce_labels = kwargs.pop("reduce_labels") super().__init__(**kwargs) self.do_resize = do_resize self.size = size self.resample = resample self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD self.ignore_index = ignore_index self.do_reduce_labels = do_reduce_labels self.class_info_file = class_info_file self.repo_path = repo_path self.metadata = prepare_metadata(repo_path, class_info_file) self.num_text = num_text def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BILINEAR, data_format=None, **kwargs, ) -> np.ndarray: """ Resize the image to the given size. Size can be min_size (scalar) or `(height, width)` tuple. If size is an int, smaller edge of the image will be matched to this number. """ if "max_size" in kwargs: warnings.warn( "The `max_size` parameter is deprecated and will be removed in v4.27. " "Please specify in `size['longest_edge'] instead`.", FutureWarning, ) max_size = kwargs.pop("max_size") else: max_size = None size = get_size_dict(size, max_size=max_size, default_to_square=False) if "shortest_edge" in size and "longest_edge" in size: size, max_size = size["shortest_edge"], size["longest_edge"] elif "height" in size and "width" in size: size = (size["height"], size["width"]) max_size = None else: raise ValueError( "Size must contain 'height' and 'width' keys or 'shortest_edge' and 'longest_edge' keys. Got" f" {size.keys()}." ) size = get_oneformer_resize_output_image_size( image=image, size=size, max_size=max_size, default_to_square=False, ) image = resize(image, size=size, resample=resample, data_format=data_format) return image # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.rescale def rescale( self, image: np.ndarray, rescale_factor: float, data_format: Optional[ChannelDimension] = None ) -> np.ndarray: """ Rescale the image by the given factor. """ return rescale(image, rescale_factor, data_format=data_format) # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.normalize def normalize( self, image: np.ndarray, mean: Union[float, Iterable[float]], std: Union[float, Iterable[float]], data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Normalize the image with the given mean and standard deviation. """ return normalize(image, mean=mean, std=std, data_format=data_format) # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.convert_segmentation_map_to_binary_masks def convert_segmentation_map_to_binary_masks( self, segmentation_map: "np.ndarray", instance_id_to_semantic_id: Optional[Dict[int, int]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, **kwargs, ): reduce_labels = reduce_labels if reduce_labels is not None else self.reduce_labels ignore_index = ignore_index if ignore_index is not None else self.ignore_index return convert_segmentation_map_to_binary_masks( segmentation_map=segmentation_map, instance_id_to_semantic_id=instance_id_to_semantic_id, ignore_index=ignore_index, reduce_labels=reduce_labels, ) def __call__(self, images, task_inputs=None, segmentation_maps=None, **kwargs) -> BatchFeature: return self.preprocess(images, task_inputs=task_inputs, segmentation_maps=segmentation_maps, **kwargs) def _preprocess( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ): if do_resize: image = self.resize(image, size=size, resample=resample) if do_rescale: image = self.rescale(image, rescale_factor=rescale_factor) if do_normalize: image = self.normalize(image, mean=image_mean, std=image_std) return image def _preprocess_image( self, image: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, data_format: Optional[Union[str, ChannelDimension]] = None, ) -> np.ndarray: """Preprocesses a single image.""" # All transformations expect numpy arrays. image = to_numpy_array(image) image = self._preprocess( image=image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, ) if data_format is not None: image = to_channel_dimension_format(image, data_format) return image def _preprocess_mask( self, segmentation_map: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, ) -> np.ndarray: """Preprocesses a single mask.""" segmentation_map = to_numpy_array(segmentation_map) # Add channel dimension if missing - needed for certain transformations added_channel_dim = False if segmentation_map.ndim == 2: added_channel_dim = True segmentation_map = segmentation_map[None, ...] # TODO: (Amy) # Remork segmentation map processing to include reducing labels and resizing which doesn't # drop segment IDs > 255. segmentation_map = self._preprocess( image=segmentation_map, do_resize=do_resize, resample=PILImageResampling.NEAREST, size=size, do_rescale=False, do_normalize=False, ) # Remove extra channel dimension if added for processing if added_channel_dim: segmentation_map = segmentation_map.squeeze(0) return segmentation_map def preprocess( self, images: ImageInput, task_inputs: Optional[List[str]] = None, segmentation_maps: Optional[ImageInput] = None, instance_id_to_semantic_id: Optional[Dict[int, int]] = None, do_resize: Optional[bool] = None, size: Optional[Dict[str, int]] = None, resample: PILImageResampling = None, do_rescale: Optional[bool] = None, rescale_factor: Optional[float] = None, do_normalize: Optional[bool] = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, ignore_index: Optional[int] = None, do_reduce_labels: Optional[bool] = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> BatchFeature: if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument is deprecated and will be removed in v4.27", FutureWarning, ) if "reduce_labels" in kwargs: warnings.warn( "The `reduce_labels` argument is deprecated and will be removed in a v4.27. Please use" " `do_reduce_labels` instead.", FutureWarning, ) if do_reduce_labels is not None: raise ValueError( "You cannot use both `reduce_labels` and `do_reduce_labels` arguments. Please use" " `do_reduce_labels` instead." ) do_reduce_labels = kwargs.pop("reduce_labels") if task_inputs is None: # Default value task_inputs = ["panoptic"] do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False, max_size=self._max_size) resample = resample if resample is not None else self.resample do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std ignore_index = ignore_index if ignore_index is not None else self.ignore_index do_reduce_labels = do_reduce_labels if do_reduce_labels is not None else self.do_reduce_labels if do_resize is not None and size is None: raise ValueError("If `do_resize` is True, `size` must be provided.") if do_rescale is not None and rescale_factor is None: raise ValueError("If `do_rescale` is True, `rescale_factor` must be provided.") if do_normalize is not None and (image_mean is None or image_std is None): raise ValueError("If `do_normalize` is True, `image_mean` and `image_std` must be provided.") if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if segmentation_maps is not None and not valid_images(segmentation_maps): raise ValueError( "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) images = make_list_of_images(images) if segmentation_maps is not None: segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2) if segmentation_maps is not None and len(images) != len(segmentation_maps): raise ValueError("Images and segmentation maps must have the same length.") images = [ self._preprocess_image( image, do_resize=do_resize, size=size, resample=resample, do_rescale=do_rescale, rescale_factor=rescale_factor, do_normalize=do_normalize, image_mean=image_mean, image_std=image_std, data_format=data_format, ) for image in images ] if segmentation_maps is not None: segmentation_maps = [ self._preprocess_mask(segmentation_map, do_resize, size) for segmentation_map in segmentation_maps ] encoded_inputs = self.encode_inputs( images, task_inputs, segmentation_maps, instance_id_to_semantic_id, ignore_index, do_reduce_labels, return_tensors, ) return encoded_inputs # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor._pad_image def _pad_image( self, image: np.ndarray, output_size: Tuple[int, int], constant_values: Union[float, Iterable[float]] = 0, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pad an image with zeros to the given size. """ input_height, input_width = get_image_size(image) output_height, output_width = output_size pad_bottom = output_height - input_height pad_right = output_width - input_width padding = ((0, pad_bottom), (0, pad_right)) padded_image = pad( image, padding, mode=PaddingMode.CONSTANT, constant_values=constant_values, data_format=data_format ) return padded_image # Copied from transformers.models.detr.image_processing_detr.DetrImageProcessor.pad def pad( self, images: List[np.ndarray], constant_values: Union[float, Iterable[float]] = 0, return_pixel_mask: bool = True, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = None, ) -> np.ndarray: """ Pads a batch of images to the bottom and right of the image with zeros to the size of largest height and width in the batch and optionally returns their corresponding pixel mask. Args: image (`np.ndarray`): Image to pad. constant_values (`float` or `Iterable[float]`, *optional*): The value to use for the padding if `mode` is `"constant"`. return_pixel_mask (`bool`, *optional*, defaults to `True`): Whether to return a pixel mask. input_channel_dimension (`ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be inferred from the input image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ pad_size = get_max_height_width(images) padded_images = [ self._pad_image(image, pad_size, constant_values=constant_values, data_format=data_format) for image in images ] data = {"pixel_values": padded_images} if return_pixel_mask: masks = [make_pixel_mask(image=image, output_size=pad_size) for image in images] data["pixel_mask"] = masks return BatchFeature(data=data, tensor_type=return_tensors) def get_semantic_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["a semantic photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx] if not np.all(mask is False): if class_id not in classes: cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 else: idx = classes.index(class_id) masks[idx] += mask masks[idx] = np.clip(masks[idx], 0, 1) num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def get_instance_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["an instance photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx] if class_id in self.metadata["thing_ids"]: if not np.all(mask is False): cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def get_panoptic_annotations(self, label, num_class_obj): annotation_classes = label["classes"] annotation_masks = label["masks"] texts = ["an panoptic photo"] * self.num_text classes = [] masks = [] for idx in range(len(annotation_classes)): class_id = annotation_classes[idx] mask = annotation_masks[idx].data if not np.all(mask is False): cls_name = self.metadata[str(class_id)] classes.append(class_id) masks.append(mask) num_class_obj[cls_name] += 1 num = 0 for i, cls_name in enumerate(self.metadata["class_names"]): if num_class_obj[cls_name] > 0: for _ in range(num_class_obj[cls_name]): if num >= len(texts): break texts[num] = f"a photo with a {cls_name}" num += 1 classes = np.array(classes) masks = np.array(masks) return classes, masks, texts def encode_inputs( self, pixel_values_list: List[ImageInput], task_inputs: List[str], segmentation_maps: ImageInput = None, instance_id_to_semantic_id: Optional[Union[List[Dict[int, int]], Dict[int, int]]] = None, ignore_index: Optional[int] = None, reduce_labels: bool = False, return_tensors: Optional[Union[str, TensorType]] = None, **kwargs, ): """ Pad images up to the largest image in a batch and create a corresponding `pixel_mask`. OneFormer addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps will be converted to lists of binary masks and their respective labels. Let's see an example, assuming `segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels = [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for each mask. Args: pixel_values_list (`List[ImageInput]`): List of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height, width)`. task_inputs (`List[str]`): List of task values. segmentation_maps (`ImageInput`, *optional*): The corresponding semantic segmentation maps with the pixel-wise annotations. (`bool`, *optional*, defaults to `True`): Whether or not to pad images up to the largest image in a batch and create a pixel mask. If left to the default, will return a pixel mask that is: - 1 for pixels that are real (i.e. **not masked**), - 0 for pixels that are padding (i.e. **masked**). instance_id_to_semantic_id (`List[Dict[int, int]]` or `Dict[int, int]`, *optional*): A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an instance segmentation map where each pixel represents an instance id. Can be provided as a single dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map instance ids in each image separately. return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor` objects. Returns: [`BatchFeature`]: A [`BatchFeature`] with the following fields: - **pixel_values** -- Pixel values to be fed to a model. - **pixel_mask** -- Pixel mask to be fed to a model (when `=True` or if `pixel_mask` is in `self.model_input_names`). - **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model (when `annotations` are provided). - **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when `annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of `mask_labels[i][j]` if `class_labels[i][j]`. - **text_inputs** -- Optional list of text string entries to be fed to a model (when `annotations` are provided). They identify the binary masks present in the image. """ if "pad_and_return_pixel_mask" in kwargs: warnings.warn( "The `pad_and_return_pixel_mask` argument has no effect and will be removed in v4.27", FutureWarning ) ignore_index = self.ignore_index if ignore_index is None else ignore_index reduce_labels = self.do_reduce_labels if reduce_labels is None else reduce_labels pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list] pad_size = get_max_height_width(pixel_values_list) encoded_inputs = self.pad(pixel_values_list, return_tensors=return_tensors) annotations = None if segmentation_maps is not None: segmentation_maps = map(np.array, segmentation_maps) annotations = [] for idx, segmentation_map in enumerate(segmentation_maps): # Use instance2class_id mapping per image if isinstance(instance_id_to_semantic_id, list): instance_id = instance_id_to_semantic_id[idx] else: instance_id = instance_id_to_semantic_id # Use instance2class_id mapping per image masks, classes = self.convert_segmentation_map_to_binary_masks( segmentation_map, instance_id, ignore_index=ignore_index, reduce_labels=reduce_labels ) annotations.append({"masks": masks, "classes": classes}) if annotations is not None: mask_labels = [] class_labels = [] text_inputs = [] num_class_obj = {} for cls_name in self.metadata["class_names"]: num_class_obj[cls_name] = 0 for i, label in enumerate(annotations): task = task_inputs[i] if task == "semantic": classes, masks, texts = self.get_semantic_annotations(label, num_class_obj) elif task == "instance": classes, masks, texts = self.get_instance_annotations(label, num_class_obj) elif task == "panoptic": classes, masks, texts = self.get_panoptic_annotations(label, num_class_obj) else: raise ValueError(f"{task} was not expected, expected `semantic`, `instance` or `panoptic`") # we cannot batch them since they don't share a common class size masks = [mask[None, ...] for mask in masks] masks = [ self._pad_image(image=mask, output_size=pad_size, constant_values=ignore_index) for mask in masks ] masks = np.concatenate(masks, axis=0) mask_labels.append(torch.from_numpy(masks)) class_labels.append(torch.from_numpy(classes).long()) text_inputs.append(texts) encoded_inputs["mask_labels"] = mask_labels encoded_inputs["class_labels"] = class_labels encoded_inputs["text_inputs"] = text_inputs # This needs to be tokenized before sending to the model. encoded_inputs["task_inputs"] = [f"the task is {task_input}" for task_input in task_inputs] return encoded_inputs # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_semantic_segmentation def post_process_semantic_segmentation( self, outputs, target_sizes: Optional[List[Tuple[int, int]]] = None ) -> "torch.Tensor": """ Converts the output of [`MaskFormerForInstanceSegmentation`] into semantic segmentation maps. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentation`]): Raw outputs of the model. target_sizes (`List[Tuple[int, int]]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction. If left to None, predictions will not be resized. Returns: `List[torch.Tensor]`: A list of length `batch_size`, where each item is a semantic segmentation map of shape (height, width) corresponding to the target_sizes entry (if `target_sizes` is specified). Each entry of each `torch.Tensor` correspond to a semantic class id. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] # Remove the null class `[..., :-1]` masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1] masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Semantic segmentation logits of shape (batch_size, num_classes, height, width) segmentation = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs) batch_size = class_queries_logits.shape[0] # Resize logits and compute semantic segmentation maps if target_sizes is not None: if batch_size != len(target_sizes): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) semantic_segmentation = [] for idx in range(batch_size): resized_logits = torch.nn.functional.interpolate( segmentation[idx].unsqueeze(dim=0), size=target_sizes[idx], mode="bilinear", align_corners=False ) semantic_map = resized_logits[0].argmax(dim=0) semantic_segmentation.append(semantic_map) else: semantic_segmentation = segmentation.argmax(dim=1) semantic_segmentation = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0])] return semantic_segmentation def post_process_instance_segmentation( self, outputs, task_type: str = "instance", is_demo: bool = True, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, target_sizes: Optional[List[Tuple[int, int]]] = None, return_coco_annotation: Optional[bool] = False, ): """ Converts the output of [`OneFormerForUniversalSegmentationOutput`] into image instance segmentation predictions. Only supports PyTorch. Args: outputs ([`OneFormerForUniversalSegmentationOutput`]): The outputs from [`OneFormerForUniversalSegmentationOutput`]. task_type (`str`, *optional)*, defaults to "instance"): The post processing depends on the task token input. If the `task_type` is "panoptic", we need to ignore the stuff predictions. is_demo (`bool`, *optional)*, defaults to `True`): Whether the model is in demo mode. If true, use threshold to predict final masks. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. return_coco_annotation (`bool`, *optional)*, defaults to `False`): Whether to return predictions in COCO format. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_queries = class_queries_logits.shape[1] num_classes = class_queries_logits.shape[-1] - 1 # Loop over items in batch size results: List[Dict[str, torch.Tensor]] = [] for i in range(batch_size): # [Q, K] scores = torch.nn.functional.softmax(class_queries_logits[i], dim=-1)[:, :-1] labels = torch.arange(num_classes).unsqueeze(0).repeat(num_queries, 1).flatten(0, 1) # scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False) scores_per_image, topk_indices = scores.flatten(0, 1).topk(num_queries, sorted=False) labels_per_image = labels[topk_indices] topk_indices = torch_int_div(topk_indices, num_classes) # mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1) mask_pred = masks_queries_logits[i][topk_indices] # Only consider scores with confidence over [threshold] for demo if is_demo: keep = scores_per_image > threshold scores_per_image = scores_per_image[keep] labels_per_image = labels_per_image[keep] mask_pred = mask_pred[keep] # if this is panoptic segmentation, we only keep the "thing" classes if task_type == "panoptic": keep = torch.zeros_like(scores_per_image).bool() for i, lab in enumerate(labels_per_image): keep[i] = lab in self.metadata["thing_ids"] scores_per_image = scores_per_image[keep] labels_per_image = labels_per_image[keep] mask_pred = mask_pred[keep] if mask_pred.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_pred.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue if "ade20k" in self.class_info_file and not is_demo and "instance" in task_type: for i in range(labels_per_image.shape[0]): labels_per_image[i] = self.metadata["thing_ids"].index(labels_per_image[i].item()) # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_pred, scores_per_image, labels_per_image, mask_threshold, overlap_mask_area_threshold, set(), target_size, ) # Return segmentation map in run-length encoding (RLE) format if return_coco_annotation: segmentation = convert_segmentation_to_rle(segmentation) results.append({"segmentation": segmentation, "segments_info": segments}) return results # Copied from transformers.models.maskformer.image_processing_maskformer.MaskFormerImageProcessor.post_process_panoptic_segmentation def post_process_panoptic_segmentation( self, outputs, threshold: float = 0.5, mask_threshold: float = 0.5, overlap_mask_area_threshold: float = 0.8, label_ids_to_fuse: Optional[Set[int]] = None, target_sizes: Optional[List[Tuple[int, int]]] = None, ) -> List[Dict]: """ Converts the output of [`MaskFormerForInstanceSegmentationOutput`] into image panoptic segmentation predictions. Only supports PyTorch. Args: outputs ([`MaskFormerForInstanceSegmentationOutput`]): The outputs from [`MaskFormerForInstanceSegmentation`]. threshold (`float`, *optional*, defaults to 0.5): The probability score threshold to keep predicted instance masks. mask_threshold (`float`, *optional*, defaults to 0.5): Threshold to use when turning the predicted masks into binary values. overlap_mask_area_threshold (`float`, *optional*, defaults to 0.8): The overlap mask area threshold to merge or discard small disconnected parts within each binary instance mask. label_ids_to_fuse (`Set[int]`, *optional*): The labels in this state will have all their instances be fused together. For instance we could say there can only be one sky in an image, but several persons, so the label ID for sky would be in that set, but not the one for person. target_sizes (`List[Tuple]`, *optional*): List of length (batch_size), where each list item (`Tuple[int, int]]`) corresponds to the requested final size (height, width) of each prediction in batch. If left to None, predictions will not be resized. Returns: `List[Dict]`: A list of dictionaries, one per image, each dictionary containing two keys: - **segmentation** -- a tensor of shape `(height, width)` where each pixel represents a `segment_id`, set to `None` if no mask if found above `threshold`. If `target_sizes` is specified, segmentation is resized to the corresponding `target_sizes` entry. - **segments_info** -- A dictionary that contains additional information on each segment. - **id** -- an integer representing the `segment_id`. - **label_id** -- An integer representing the label / semantic class id corresponding to `segment_id`. - **was_fused** -- a boolean, `True` if `label_id` was in `label_ids_to_fuse`, `False` otherwise. Multiple instances of the same class / label were fused and assigned a single `segment_id`. - **score** -- Prediction score of segment with `segment_id`. """ if label_ids_to_fuse is None: logger.warning("`label_ids_to_fuse` unset. No instance will be fused.") label_ids_to_fuse = set() class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1] masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width] batch_size = class_queries_logits.shape[0] num_labels = class_queries_logits.shape[-1] - 1 mask_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width] # Predicted label and score of each query (batch_size, num_queries) pred_scores, pred_labels = nn.functional.softmax(class_queries_logits, dim=-1).max(-1) # Loop over items in batch size results: List[Dict[str, TensorType]] = [] for i in range(batch_size): mask_probs_item, pred_scores_item, pred_labels_item = remove_low_and_no_objects( mask_probs[i], pred_scores[i], pred_labels[i], threshold, num_labels ) # No mask found if mask_probs_item.shape[0] <= 0: height, width = target_sizes[i] if target_sizes is not None else mask_probs_item.shape[1:] segmentation = torch.zeros((height, width)) - 1 results.append({"segmentation": segmentation, "segments_info": []}) continue # Get segmentation map and segment information of batch item target_size = target_sizes[i] if target_sizes is not None else None segmentation, segments = compute_segments( mask_probs=mask_probs_item, pred_scores=pred_scores_item, pred_labels=pred_labels_item, mask_threshold=mask_threshold, overlap_mask_area_threshold=overlap_mask_area_threshold, label_ids_to_fuse=label_ids_to_fuse, target_size=target_size, ) results.append({"segmentation": segmentation, "segments_info": segments}) return results
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/reformer/modeling_reformer.py
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch REFORMER model.""" import sys from collections import namedtuple from dataclasses import dataclass from functools import reduce from operator import mul from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.autograd.function import Function from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_reformer import ReformerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/reformer-crime-and-punishment" _CONFIG_FOR_DOC = "ReformerConfig" REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/reformer-crime-and-punishment", "google/reformer-enwik8", # See all Reformer models at https://huggingface.co/models?filter=reformer ] # Define named tuples for nn.Modules here LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"]) LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"]) AttentionOutput = namedtuple("AttentionOutput", ["hidden_states", "attention_probs", "buckets"]) ReformerOutput = namedtuple("ReformerOutput", ["hidden_states", "attn_output", "attention_probs", "buckets"]) ReformerBackwardOutput = namedtuple( "ReformerBackwardOutput", ["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"] ) ReformerEncoderOutput = namedtuple( "ReformerEncoderOutput", ["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"], ) def _stable_argsort(vector, dim): # this function scales the vector so that torch.argsort is stable. # torch.argsort is not stable on its own scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1) scale_offset = scale_offset.expand(vector.shape) scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim]) return torch.argsort(scaled_vector, dim=dim) def _get_least_common_mult_chunk_len(config): attn_types = config.attn_layers attn_types_set = set(attn_types) if len(attn_types_set) == 1 and attn_types[0] == "lsh": return config.lsh_attn_chunk_length elif len(attn_types_set) == 1 and attn_types[0] == "local": return config.local_attn_chunk_length elif len(attn_types_set) == 2 and attn_types_set == {"lsh", "local"}: return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select " "attn layer types from ['lsh', 'local'] only." ) def _get_min_chunk_len(config): attn_types = config.attn_layers attn_types_set = set(attn_types) if len(attn_types_set) == 1 and attn_types[0] == "lsh": return config.lsh_attn_chunk_length elif len(attn_types_set) == 1 and attn_types[0] == "local": return config.local_attn_chunk_length elif len(attn_types_set) == 2 and attn_types_set == {"lsh", "local"}: return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select " "attn layer types from ['lsh', 'local'] only." ) class AxialPositionEmbeddings(nn.Module): """ Constructs axial position embeddings. Useful for very long input sequences to save memory and time. """ def __init__(self, config): super().__init__() self.axial_pos_shape = config.axial_pos_shape self.axial_pos_embds_dim = config.axial_pos_embds_dim self.dropout = config.hidden_dropout_prob self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config) self.weights = nn.ParameterList() if sum(self.axial_pos_embds_dim) != config.hidden_size: raise ValueError( f"Make sure that config.axial_pos_embds factors: {self.axial_pos_embds_dim} sum to " f"config.hidden_size: {config.hidden_size}" ) # create weights for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim): # create expanded shapes ax_shape = [1] * len(self.axial_pos_shape) ax_shape[axis] = self.axial_pos_shape[axis] ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,) # create tensor and init self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32))) def forward(self, position_ids): # broadcast weights to correct shape batch_size = position_ids.shape[0] sequence_length = position_ids.shape[1] broadcasted_weights = [ weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights ] if self.training is True: if reduce(mul, self.axial_pos_shape) != sequence_length: raise ValueError( f"If training, make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply to " f"sequence length. Got prod({self.axial_pos_shape}) != sequence_length: {sequence_length}. " f"You might want to consider padding your sequence length to {reduce(mul, self.axial_pos_shape)} " "or changing config.axial_pos_shape." ) if self.dropout > 0: weights = torch.cat(broadcasted_weights, dim=-1) # permute weights so that 2D correctly drops dims 1 and 2 transposed_weights = weights.transpose(2, 1) # drop entire matrix of last two dims (prev dims 1 and 2) dropped_transposed_weights = nn.functional.dropout2d( transposed_weights, p=self.dropout, training=self.training ) dropped_weights = dropped_transposed_weights.transpose(2, 1) position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1)) else: position_encodings = torch.cat( [torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights], dim=-1, ) else: if reduce(mul, self.axial_pos_shape) < sequence_length: raise ValueError( f"Make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply at least to " f"max(sequence_length, least_common_mult_chunk_length): max({sequence_length}, " f"{self.least_common_mult_chunk_length})." ) # compute how many columns are needed max_position_id = position_ids.max().item() required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1]) # cut to columns that are needed position_encodings = torch.cat( [weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1 ) position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1])) # select correct position encodings position_encodings = torch.cat( [ torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0) for i in range(batch_size) ], dim=0, ) return position_encodings class PositionEmbeddings(nn.Module): """Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`.""" def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size) def forward(self, position_ids): position_embeddings = self.embedding(position_ids) position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training) return position_embeddings class ReformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.dropout = config.hidden_dropout_prob self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = ( AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config) ) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0): if input_ids is not None: input_shape = input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device seq_length = input_shape[1] if position_ids is None: position_ids = torch.arange( start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if position_ids.shape[-1] > self.max_position_embeddings: raise ValueError( f"Sequence Length: {position_ids.shape[-1]} has to be less or equal than " f"config.max_position_embeddings {self.max_position_embeddings}." ) # dropout embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training) # add positional embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings return embeddings class EfficientAttentionMixin: """ A few utilities for nn.Modules in Reformer, to be used as a mixin. """ def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after): """ Used to implement attention between consecutive chunks. Args: vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...] num_chunks_before: chunks before current chunk to include in attention num_chunks_after: chunks after current chunk to include in attention Returns: tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after). """ if num_chunks_before == 0 and num_chunks_after == 0: return vectors slices = [] for i in range(-num_chunks_before, num_chunks_after + 1): if i == 0: slices.append(vectors) else: slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2)) return torch.cat(slices, dim=3) def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size): """ splits hidden_size dim into attn_head_size and num_attn_heads """ new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size) x = x.view(*new_x_shape) return x.transpose(2, 1) def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size): """ merges attn_head_size dim and num_attn_heads dim into hidden_size """ x = x.permute(0, 2, 1, 3) return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size)) def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None): """ splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims """ batch_size = vectors.shape[0] split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2) if len(vectors.shape) == 4: return torch.reshape(vectors, split_dim_shape + (attn_head_size,)) elif len(vectors.shape) == 3: return torch.reshape(vectors, split_dim_shape) else: raise ValueError(f"Input vector rank should be one of [3, 4], but is: {len(vectors.shape)}") class LSHSelfAttention(nn.Module, EfficientAttentionMixin): def __init__(self, config): super().__init__() self.config = config self.chunk_length = config.lsh_attn_chunk_length self.num_hashes = config.num_hashes self.num_buckets = config.num_buckets self.num_chunks_before = config.lsh_num_chunks_before self.num_chunks_after = config.lsh_num_chunks_after self.hash_seed = config.hash_seed self.is_decoder = config.is_decoder self.max_position_embeddings = config.max_position_embeddings self.dropout = config.lsh_attention_probs_dropout_prob self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.hidden_size = config.hidden_size # projection matrices self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False) # save mask value here. Need fp32 and fp16 mask values self.register_buffer("self_mask_value_float16", torch.tensor(-1e3)) self.register_buffer("self_mask_value_float32", torch.tensor(-1e5)) self.register_buffer("mask_value_float16", torch.tensor(-1e4)) self.register_buffer("mask_value_float32", torch.tensor(-1e9)) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, buckets=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs, ): sequence_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] # num hashes can optionally be overwritten by user num_hashes = num_hashes if num_hashes is not None else self.num_hashes do_cached_attention = use_cache and past_buckets_states[1] is not None # check if cache shall be used and that hidden states are already cached if do_cached_attention: assert sequence_length == 1, ( "At the moment, auto-regressive language generation is only possible one word at a time. Make sure" f" that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed." ) past_buckets = past_buckets_states[0] past_states = past_buckets_states[1] # get query vector query_vectors = self.query_key(hidden_states) query_vectors = self._split_hidden_size_dim( query_vectors, self.num_attention_heads, self.attention_head_size ) if past_buckets is not None: key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets( query_vectors=query_vectors, attention_mask=attention_mask, num_hashes=num_hashes, hidden_states=hidden_states, past_states=past_states, past_buckets=past_buckets, ) query_key_vectors = self._query_per_attn_head(key_value_hidden_states) value_vectors = self._value_per_attn_head(key_value_hidden_states) # split key & value vectors by num hashes to apply # self attention on each separately query_key_vectors = self._split_seq_length_dim_to( query_key_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size, ) # repeat query vectors across hash dimension query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1) else: key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1) query_key_vectors = self.query_key(key_value_hidden_states) value_vectors = self.value(key_value_hidden_states) else: # project hidden_states to query_key and value query_vectors = None query_key_vectors = self.query_key(hidden_states) value_vectors = self.value(hidden_states) # if query key is not already split if not do_cached_attention or past_buckets is None: query_key_vectors = self._split_hidden_size_dim( query_key_vectors, self.num_attention_heads, self.attention_head_size ) value_vectors = self._split_hidden_size_dim( value_vectors, self.num_attention_heads, self.attention_head_size ) # cache buckets for next incremental decoding if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length: buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask) # free memory del hidden_states assert ( query_key_vectors.shape[-1] == self.attention_head_size ), f"last dim of query_key_vectors is {query_key_vectors.shape[-1]} but should be {self.attention_head_size}." assert ( value_vectors.shape[-1] == self.attention_head_size ), f"last dim of value_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}." do_standard_self_attention = (sequence_length <= self.chunk_length) or ( use_cache and past_buckets_states[1] is not None ) # LSH attention only makes sense if chunked attention should be performed if not do_standard_self_attention: # set `num_buckets` on the fly, recommended way to do it if self.num_buckets is None: self._set_num_buckets(sequence_length) # use cached buckets for backprop only if buckets is None: # hash query key vectors into buckets buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask) else: # make sure buckets has correct shape for LSH attention buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length) assert ( int(buckets.shape[-1]) == num_hashes * sequence_length ), f"last dim of buckets is {buckets.shape[-1]}, but should be {num_hashes * sequence_length}" sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx( sequence_length, buckets, num_hashes ) # make sure bucket idx is not longer then sequence length sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length # cluster query key value vectors according to hashed buckets query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes) value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes) query_key_vectors = self._split_seq_length_dim_to( query_key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) if self.chunk_length is None: assert self.num_chunks_before == 0 and self.num_chunks_after == 0, ( "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and" " `config.num_chunks_before` are set to 0." ) elif do_cached_attention and past_buckets is not None: # use max sequence length sorted_bucket_idx_per_hash = sorted_bucket_idx else: # get sequence length indices sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat( batch_size, self.num_attention_heads, 1 ) # scale key vectors sqrt_num = np.sqrt(self.attention_head_size) key_vectors = self._len_and_dim_norm(query_key_vectors, sqrt_num) # set query_vectors to query key vectors if LSH self attention query_vectors = query_vectors if query_vectors is not None else query_key_vectors # free memory del query_key_vectors # get attention probs out_vectors, logits, attention_probs = self._attend( query_vectors=query_vectors, key_vectors=key_vectors, value_vectors=value_vectors, sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash, attention_mask=attention_mask, head_mask=head_mask, do_standard_self_attention=do_standard_self_attention, do_cached_attention=do_cached_attention, ) # free memory del key_vectors, value_vectors # re-order out_vectors and logits if not do_standard_self_attention: # sort clusters back to correct ordering out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx) if not do_standard_self_attention or (do_cached_attention and past_buckets is not None): # sum up all hash rounds if num_hashes > 1: out_vectors = self._split_seq_length_dim_to( out_vectors, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size, ) logits = self._split_seq_length_dim_to( logits, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size, ).unsqueeze(-1) probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True)) out_vectors = torch.sum(out_vectors * probs_vectors, dim=2) # free memory del probs_vectors # free memory del logits assert out_vectors.shape == ( batch_size, self.num_attention_heads, sequence_length, self.attention_head_size, ), ( "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length," " config.attention_head_size]`." ) out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size) if output_attentions is False: attention_probs = () if buckets is not None: buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1) return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets) def _query_per_attn_head(self, hidden_states): per_head_query_key = self.query_key.weight.reshape( self.num_attention_heads, self.attention_head_size, self.hidden_size ).transpose(-2, -1) # only relevant for inference and no bias => we can use einsum here query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key) return query_key_vectors def _value_per_attn_head(self, hidden_states): per_head_value = self.value.weight.reshape( self.num_attention_heads, self.attention_head_size, self.hidden_size ).transpose(-2, -1) # only relevant for inference and no bias => we can use einsum here value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value) return value_vectors def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False): batch_size = vectors.shape[0] # See https://arxiv.org/pdf/1509.02897.pdf # We sample a different random rotation for each round of hashing to # decrease the probability of hash misses. if isinstance(self.num_buckets, int): assert ( self.num_buckets % 2 == 0 ), f"There should be an even number of buckets, but `self.num_buckets`: {self.num_buckets}" rotation_size = self.num_buckets num_buckets = self.num_buckets else: # Factorize the hash if self.num_buckets is a list or tuple rotation_size, num_buckets = 0, 1 for bucket_factor in self.num_buckets: assert ( bucket_factor % 2 == 0 ), f"The number of buckets should be even, but `num_bucket`: {bucket_factor}" rotation_size = rotation_size + bucket_factor num_buckets = num_buckets * bucket_factor # remove gradient vectors = vectors.detach() if self.hash_seed is not None: # for determinism torch.manual_seed(self.hash_seed) rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2) # create a random self.attention_head_size x num_hashes x num_buckets/2 random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype) # Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2 rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations) if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1: rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1) buckets = torch.argmax(rotated_vectors, dim=-1) else: # Get the buckets for them and combine. buckets, cur_sum, cur_product = None, 0, 1 for bucket_factor in self.num_buckets: rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)] cur_sum = cur_sum + bucket_factor // 2 rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1) if buckets is None: buckets = torch.argmax(rotated_vectors_factor, dim=-1) else: buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1)) cur_product = cur_product * bucket_factor if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]): # add an extra bucket for padding tokens only num_buckets = num_buckets + 1 # assign padding tokens extra bucket buckets_mask = attention_mask.to(torch.bool)[:, None, None, :].expand(buckets.shape) buckets = torch.where( buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device) ) elif increase_num_buckets: num_buckets = num_buckets + 1 # buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len). # Next we add offsets so that bucket numbers from different hashing rounds don't overlap. offsets = torch.arange(num_hashes, device=vectors.device) offsets = (offsets * num_buckets).view((1, 1, -1, 1)) # expand to batch size and num attention heads offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:]) offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3) return offset_buckets def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes): # no gradients are needed with torch.no_grad(): # hash-based sort sorted_bucket_idx = _stable_argsort(buckets, dim=-1) # create simple indices to scatter to, to have undo sort indices = ( torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device) .view(1, 1, -1) .expand(sorted_bucket_idx.shape) ) # get undo sort undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size()) undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices) return sorted_bucket_idx, undo_sorted_bucket_idx def _set_num_buckets(self, sequence_length): # `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1 # make sure buckets are power of 2 num_buckets = 2**num_buckets_pow_2 # factorize `num_buckets` if `num_buckets` becomes too large num_buckets_limit = 2 * max( int((self.max_position_embeddings // self.chunk_length) ** (0.5)), self.chunk_length, ) if num_buckets > num_buckets_limit: num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)] logger.warning(f"config.num_buckets is not set. Setting config.num_buckets to {num_buckets}...") # set num buckets in config to be properly saved self.config.num_buckets = num_buckets self.num_buckets = num_buckets def _attend( self, query_vectors, key_vectors, value_vectors, sorted_bucket_idx_per_hash, attention_mask, head_mask, do_standard_self_attention, do_cached_attention, ): # look at previous and following chunks if chunked attention if not do_standard_self_attention: key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after) value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after) # get logits and dots # (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft)) query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2)) # free memory del query_vectors, key_vectors # if chunked attention split bucket idxs to query and key if not do_standard_self_attention: query_bucket_idx = self._split_seq_length_dim_to( sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads ) key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after) elif do_cached_attention and query_key_dots.ndim > 4: key_value_bucket_idx = sorted_bucket_idx_per_hash query_bucket_idx = ( key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max() ) elif do_cached_attention and query_key_dots.ndim <= 4: query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1] key_value_bucket_idx = torch.arange( query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device )[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,)) else: query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash # get correct mask values depending on precision if query_key_dots.dtype == torch.float16: self_mask_value = self.self_mask_value_float16.half() mask_value = self.mask_value_float16.half() else: self_mask_value = self.self_mask_value_float32 mask_value = self.mask_value_float32 if not do_cached_attention: mask = self._compute_attn_mask( query_bucket_idx, key_value_bucket_idx, attention_mask, query_key_dots.shape, do_standard_self_attention, ) if mask is not None: query_key_dots = torch.where(mask, query_key_dots, mask_value) # free memory del mask # Self mask is ALWAYS applied. # From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf): # " While attention to the future is not allowed, typical implementations of the # Transformer do allow a position to attend to itself. # Such behavior is undesirable in a shared-QK formulation because the dot-product # of a query vector with itself will almost always be greater than the dot product of a # query vector with a vector at another position. We therefore modify the masking # to forbid a token from attending to itself, except in situations # where a token has no other valid attention targets (e.g. the first token in a sequence) " self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to( query_bucket_idx.device ) # apply self_mask query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value) # free memory del self_mask logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True) # dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]` attention_probs = torch.exp(query_key_dots - logits) # free memory del query_key_dots # dropout attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # attend values out_vectors = torch.matmul(attention_probs, value_vectors) # free memory del value_vectors # merge chunk length if out_vectors.ndim > 4: logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1) out_vectors = out_vectors.flatten(start_dim=2, end_dim=3) return out_vectors, logits, attention_probs def _compute_attn_mask( self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention ): # attention mask for LSH if attention_mask is not None: # if chunked attention, the attention mask has to correspond to LSH order attention_mask = attention_mask.to(torch.bool)[:, None, :] if not do_standard_self_attention: # expand attn_mask to fit with key_value_bucket_idx shape attention_mask = attention_mask[:, None, :] attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,)) # extract attention mask from LSH sorted key_indices attention_mask = torch.gather(attention_mask, -1, key_indices) attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape) # Causal mask if self.is_decoder is True: causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device) # add attention mask if not None if attention_mask is not None: attention_mask = causal_mask * attention_mask else: attention_mask = causal_mask return attention_mask def _get_relevant_hid_states_and_buckets( self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets ): # concat hidden states hidden_states = torch.cat([past_states, hidden_states], dim=1) # batch_size hidden batch_size = hidden_states.shape[0] sequence_length = hidden_states.shape[1] # check if cached buckets include pad bucket max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets) # if pad bucket was cached => need to increase num buckets for caching increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1 # retrieve query buckets query_buckets = self._hash_vectors( query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets ) # concat buckets concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1) # hash-based sort bucket_idx = _stable_argsort(concat_buckets, dim=-1) # bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength assert bucket_idx.shape == ( batch_size, self.num_attention_heads, num_hashes, sequence_length, ), ( f"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but" f" has shape {bucket_idx.shape}." ) # find indices of new bucket indices relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero() # expand relevant bucket indices to its chunks relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length) relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))] # adapt bucket_idx for batch and hidden states for index select bucket_idx_batch_offset = sequence_length * ( batch_size * torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long) // relevant_bucket_idx_chunk.shape[-1] ) # add batch offset relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset hidden_states = hidden_states.reshape((-1, self.hidden_size)) # select all relevant hidden states relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch) # reshape hidden states and bucket_idx to correct output relevant_hidden_states = relevant_hidden_states.reshape( batch_size, self.num_attention_heads, -1, self.hidden_size ) relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape( batch_size, self.num_attention_heads, num_hashes, -1 ) assert ( relevant_hidden_states.shape[2] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes ), ( "There should be" f" {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`," f" there are {relevant_hidden_states.shape[2]} `hidden_states`." ) assert ( relevant_bucket_idx_chunk.shape[-1] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length ), ( "There should be" f" {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are" f" {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`." ) return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length): # get relevant indices of where chunk starts and its size start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after) # expand start indices and add correct chunk offset via arange expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size) chunk_sequence_indices = expanded_start_indices + torch.arange( total_chunk_size, device=indices.device, dtype=torch.long ).unsqueeze(0).expand(indices.shape[0], total_chunk_size) # make sure that circular logic holds via % seq len chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length # expand indices and set indices correctly indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone() indices[:, -1] = chunk_sequence_indices return indices def _len_and_dim_norm(self, vectors, sqrt_num): """ length and attention head size dim normalization """ vectors = self._len_norm(vectors) vectors = vectors / sqrt_num return vectors def _len_norm(self, x, epsilon=1e-6): """ length normalization """ variance = torch.mean(x**2, -1, keepdim=True) norm_x = x * torch.rsqrt(variance + epsilon) return norm_x def _gather_by_expansion(self, vectors, idxs, num_hashes): """ expand dims of idxs and vectors for all hashes and gather """ expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size) vectors = vectors.repeat(1, 1, num_hashes, 1) return torch.gather(vectors, 2, expanded_idxs) class ReverseSort(Function): """ After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here. """ @staticmethod def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx): # save sorted_bucket_idx for backprop with torch.no_grad(): ctx.sorted_bucket_idx = sorted_bucket_idx # undo sort to have correct order for next layer expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape) out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices) logits = torch.gather(logits, 2, undo_sorted_bucket_idx) return out_vectors, logits @staticmethod def backward(ctx, grad_out_vectors, grad_logits): # get parameters saved in ctx sorted_bucket_idx = ctx.sorted_bucket_idx expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape) # reverse sort of forward grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices) grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx) # return grad and `None` fillers for last 2 forward args return grad_out_vectors, grad_logits, None, None class LocalSelfAttention(nn.Module, EfficientAttentionMixin): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.chunk_length = config.local_attn_chunk_length self.num_chunks_before = config.local_num_chunks_before self.num_chunks_after = config.local_num_chunks_after self.is_decoder = config.is_decoder self.pad_token_id = config.pad_token_id self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.hidden_size = config.hidden_size # projection matrices self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.dropout = config.local_attention_probs_dropout_prob # save mask value here self.register_buffer("mask_value_float16", torch.tensor(-1e4)) self.register_buffer("mask_value_float32", torch.tensor(-1e9)) def forward( self, hidden_states, attention_mask=None, head_mask=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs, ): sequence_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] # check if cache shall be used and that hidden states are already cached if use_cache and past_buckets_states[1] is not None: assert past_buckets_states[0] is None, ( "LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching" " hidden_states_and_buckets." ) key_value_hidden_states = self._retrieve_relevant_hidden_states( past_buckets_states[1], self.chunk_length, self.num_chunks_before ) key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1) # only query vector for last token query_vectors = self.query(hidden_states) # compute key and value for relevant chunk key_vectors = self.key(key_value_hidden_states) value_vectors = self.value(key_value_hidden_states) # free memory del key_value_hidden_states else: # project hidden_states to query, key and value query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) # split last dim into `config.num_attention_heads` and `config.attention_head_size` query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size) key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size) value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size) assert ( query_vectors.shape[-1] == self.attention_head_size ), f"last dim of query_key_vectors is {query_vectors.shape[-1]} but should be {self.attention_head_size}." assert ( key_vectors.shape[-1] == self.attention_head_size ), f"last dim of query_key_vectors is {key_vectors.shape[-1]} but should be {self.attention_head_size}." assert ( value_vectors.shape[-1] == self.attention_head_size ), f"last dim of query_key_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}." if self.chunk_length is None: assert self.num_chunks_before == 0 and self.num_chunks_after == 0, ( "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and" " `config.num_chunks_before` are set to 0." ) # normalize key vectors key_vectors = key_vectors / np.sqrt(self.attention_head_size) # get sequence length indices indices = torch.arange(sequence_length, device=query_vectors.device).repeat( batch_size, self.num_attention_heads, 1 ) # if one should do normal n^2 self-attention do_standard_self_attention = sequence_length <= self.chunk_length # if input should be chunked if not do_standard_self_attention: # chunk vectors # B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size query_vectors = self._split_seq_length_dim_to( query_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) key_vectors = self._split_seq_length_dim_to( key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) # chunk indices query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads) key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads) # append chunks before and after key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after) value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after) key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after) else: query_indices = key_indices = indices # query-key matmul: QK^T query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2)) # free memory del query_vectors, key_vectors mask = self._compute_attn_mask( query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention ) if mask is not None: # get mask tensor depending on half precision or not if query_key_dots.dtype == torch.float16: mask_value = self.mask_value_float16.half() else: mask_value = self.mask_value_float32 query_key_dots = torch.where(mask, query_key_dots, mask_value) # free memory del mask # softmax logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True) attention_probs = torch.exp(query_key_dots - logits) # free memory del logits # dropout attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # attend values out_vectors = torch.matmul(attention_probs, value_vectors) # free memory del value_vectors # merge chunk length if not do_standard_self_attention: out_vectors = out_vectors.flatten(start_dim=2, end_dim=3) assert out_vectors.shape == ( batch_size, self.num_attention_heads, sequence_length, self.attention_head_size, ) out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size) if output_attentions is False: attention_probs = () return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs) def _compute_attn_mask( self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention ): # chunk attention mask and look before and after if attention_mask is not None: attention_mask = attention_mask.to(torch.bool)[:, None, :] if not do_standard_self_attention: attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1) attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after) # create attn_mask attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape) # Causal mask if self.is_decoder is True: causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device) # add attention mask if not None if attention_mask is not None: attention_mask = causal_mask * attention_mask else: attention_mask = causal_mask return attention_mask @staticmethod def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before): start_position = ((previous_hidden_states.shape[1] // chunk_length) - num_chunks_before) * chunk_length return previous_hidden_states[:, start_position:] class ReformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dropout = config.hidden_dropout_prob self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ReformerAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.layer_id = layer_id self.attn_layers = config.attn_layers self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh": self.self_attention = LSHSelfAttention(config) elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "local": self.self_attention = LocalSelfAttention(config) elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == {"lsh", "local"}: # get correct attn layers if self.attn_layers[self.layer_id] == "lsh": self.self_attention = LSHSelfAttention(config) else: self.self_attention = LocalSelfAttention(config) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {self.attn_layers}. " "Select attn layer types from ['lsh', 'local'] only." ) self.output = ReformerSelfOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, buckets=None, ): hidden_states = self.layer_norm(hidden_states) # make sure cached hidden states is set to None for backward pass if past_buckets_states is not None: past_buckets_states_layer = past_buckets_states[self.layer_id] else: past_buckets_states_layer = None # use cached buckets for backprob if buckets not None for LSHSelfAttention self_attention_outputs = self.self_attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states_layer, use_cache=use_cache, output_attentions=output_attentions, buckets=buckets, ) # add buckets if necessary if hasattr(self_attention_outputs, "buckets"): buckets = self_attention_outputs.buckets else: buckets = None # cache hidden states for future use if use_cache: if past_buckets_states[self.layer_id][0] is None: # padded input should not be cached past_buckets = ( buckets[:, :, :, :orig_sequence_length] if (buckets is not None and orig_sequence_length > 1) else buckets ) else: past_buckets = torch.cat([past_buckets_states[self.layer_id][0], buckets], dim=-1) if past_buckets_states[self.layer_id][1] is None: # padded input should not be cached past_states = hidden_states[:, :orig_sequence_length] else: past_states = torch.cat([past_buckets_states[self.layer_id][1], hidden_states], dim=1) past_buckets_states[self.layer_id] = (past_buckets, past_states) # compute attention feed forward output attention_output = self.output(self_attention_outputs.hidden_states) return AttentionOutput( hidden_states=attention_output, attention_probs=self_attention_outputs.attention_probs, buckets=buckets, ) class ReformerFeedForwardDense(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob if isinstance(config.hidden_act, str): self.act_fn = ACT2FN[config.hidden_act] else: self.act_fn = config.hidden_act self.dense = nn.Linear(config.hidden_size, config.feed_forward_size) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = self.act_fn(hidden_states) return hidden_states class ReformerFeedForwardOutput(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.dense = nn.Linear(config.feed_forward_size, config.hidden_size) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ChunkReformerFeedForward(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense = ReformerFeedForwardDense(config) self.output = ReformerFeedForwardOutput(config) def forward(self, attention_output): return apply_chunking_to_forward( self.forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) def forward_chunk(self, hidden_states): hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense(hidden_states) return self.output(hidden_states) class ReformerLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.attention = ReformerAttention(config, layer_id) # dropout requires to have the same # seed for forward and backward pass self.attention_seed = None self.feed_forward_seed = None self.feed_forward = ChunkReformerFeedForward(config) def _init_attention_seed(self): """ This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.attention_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.attention_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.attention_seed) def _init_feed_forward_seed(self): """ This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.feed_forward_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.feed_forward_seed) def forward( self, prev_attn_output, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, ): with torch.no_grad(): # every forward pass we sample a different seed # for dropout and save for forward fn in backward pass # to have correct dropout if self.training: self._init_attention_seed() attn_outputs = self.attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = attn_outputs.hidden_states # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # Y_1 = X_1 + f(X_2) attn_output = prev_attn_output + attn_output # free memory del prev_attn_output # every forward pass we sample a different seed # for dropout and save seed for forward fn in backward # to have correct dropout if self.training: self._init_feed_forward_seed() # Y_2 = X_2 + g(Y_1) hidden_states = hidden_states + self.feed_forward(attn_output) return ReformerOutput( attn_output=attn_output, hidden_states=hidden_states, attention_probs=attn_outputs.attention_probs, buckets=attn_outputs.buckets, ) def backward_pass( self, next_attn_output, hidden_states, grad_attn_output, grad_hidden_states, attention_mask=None, head_mask=None, buckets=None, ): # Implements the backward pass for reversible ResNets. # A good blog post on how this works can be found here: # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py assert self.training, ( "If you want to train `ReformerModel` and its variations, make sure to use `model.train()` to put the" " model into training mode." ) with torch.enable_grad(): next_attn_output.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.feed_forward_seed) # g(Y_1) res_hidden_states = self.feed_forward(next_attn_output) res_hidden_states.backward(grad_hidden_states, retain_graph=True) with torch.no_grad(): # X_2 = Y_2 - g(Y_1) hidden_states = hidden_states - res_hidden_states del res_hidden_states grad_attn_output = grad_attn_output + next_attn_output.grad next_attn_output.grad = None with torch.enable_grad(): hidden_states.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.attention_seed) # f(X_2) # use cached buckets for backprob if buckets not None for LSHSelfAttention output = self.attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, buckets=buckets, ).hidden_states output.backward(grad_attn_output, retain_graph=True) with torch.no_grad(): # X_1 = Y_1 - f(X_2) attn_output = next_attn_output - output del output, next_attn_output grad_hidden_states = grad_hidden_states + hidden_states.grad hidden_states.grad = None hidden_states = hidden_states.detach() return ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, ) class _ReversibleFunction(Function): """ To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here. This way it is made sure that no memory expensive activations are saved during the forward pass. This function is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py """ @staticmethod def forward( ctx, hidden_states, layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions, ): all_buckets = () # split duplicated tensor hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1) for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)): if output_hidden_states is True: all_hidden_states.append(hidden_states) layer_outputs = layer( prev_attn_output=attn_output, hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = layer_outputs.attn_output hidden_states = layer_outputs.hidden_states all_buckets = all_buckets + (layer_outputs.buckets,) if output_attentions: all_attentions.append(layer_outputs.attention_probs) # Add last layer if output_hidden_states is True: all_hidden_states.append(hidden_states) # attach params to ctx for backward ctx.save_for_backward(attn_output.detach(), hidden_states.detach()) ctx.layers = layers ctx.all_buckets = all_buckets ctx.head_mask = head_mask ctx.attention_mask = attention_mask # Concatenate 2 RevNet outputs return torch.cat([attn_output, hidden_states], dim=-1) @staticmethod def backward(ctx, grad_hidden_states): grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1) # retrieve params from ctx for backward attn_output, hidden_states = ctx.saved_tensors # create tuple output = ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, ) # free memory del grad_attn_output, grad_hidden_states, attn_output, hidden_states layers = ctx.layers all_buckets = ctx.all_buckets head_mask = ctx.head_mask attention_mask = ctx.attention_mask for idx, layer in enumerate(layers[::-1]): # pop last buckets from stack buckets = all_buckets[-1] all_buckets = all_buckets[:-1] # backprop output = layer.backward_pass( next_attn_output=output.attn_output, hidden_states=output.hidden_states, grad_attn_output=output.grad_attn_output, grad_hidden_states=output.grad_hidden_states, head_mask=head_mask[len(layers) - idx - 1], attention_mask=attention_mask, buckets=buckets, ) assert all_buckets == (), "buckets have to be empty after backpropagation" grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1) # num of return vars has to match num of forward() args # return gradient for hidden_states arg and None for other args return grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None class ReformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)]) # Reformer is using Rev Nets, thus last layer outputs are concatenated and # Layer Norm is done over 2 * hidden_size self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_hidden_states=False, output_attentions=False, ): # hidden_states and attention lists to be filled if wished all_hidden_states = [] all_attentions = [] # init cached hidden states if necessary if past_buckets_states is None: past_buckets_states = [((None), (None)) for i in range(len(self.layers))] # concat same tensor for reversible ResNet hidden_states = torch.cat([hidden_states, hidden_states], dim=-1) hidden_states = _ReversibleFunction.apply( hidden_states, self.layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions, ) # Apply layer norm to concatenated hidden states hidden_states = self.layer_norm(hidden_states) # Apply dropout hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return ReformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, all_attentions=all_attentions, past_buckets_states=past_buckets_states, ) class ReformerOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() # Reformer is using Rev Nets, thus last layer outputs are concatenated and # Layer Norm is done over 2 * hidden_size self.seq_len_dim = 1 self.chunk_size_lm_head = config.chunk_size_lm_head self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias class ReformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ReformerConfig base_model_prefix = "reformer" @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "input_ids": input_ids, "attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" if isinstance(module, AxialPositionEmbeddings): for weight in module.weights: nn.init.normal_(weight, std=self.config.axial_norm_std) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class ReformerModelOutput(ModelOutput): """ Output type of [`ReformerModel`]. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. past_buckets_states (`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `Tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ReformerModelWithLMHeadOutput(ModelOutput): """ Output type of [`ReformerModelWithLMHead`]. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided) Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. past_buckets_states (`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `Tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): TTuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None REFORMER_START_DOCSTRING = r""" Reformer was proposed in [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ReformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ REFORMER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. num_hashes (`int`, *optional*): The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in `config.num_hashes`. For more information, see `num_hashes` in [`ReformerConfig`]. past_buckets_states (`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*): List of `Tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Reformer Model transformer outputting raw hidden-stateswithout any specific head on top.", REFORMER_START_DOCSTRING, ) class ReformerModel(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config assert ( self.config.num_hidden_layers > 0 ), "`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']" self.embeddings = ReformerEmbeddings(config) self.encoder = ReformerEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=ReformerModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, past_buckets_states: Optional[List[Tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ReformerModelOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() # noqa: F841 device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] # noqa: F841 device = inputs_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds") assert ( len(input_shape) == 2 ), f"`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {input_shape}" if past_buckets_states is not None: assert not self.training, "`past_buckets_states` can only be used for inference, not for training`." # prepare head mask head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True) # original sequence length for padding orig_sequence_length = input_shape[-1] # if needs padding least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config) min_chunk_length = _get_min_chunk_len(self.config) must_pad_to_match_chunk_length = ( input_shape[-1] % least_common_mult_chunk_length != 0 and input_shape[-1] > min_chunk_length and past_buckets_states is None ) if must_pad_to_match_chunk_length: padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length if self.training is True: raise ValueError( f"If training, sequence length {input_shape[-1]} has to be a multiple of least common multiple " f"chunk_length {least_common_mult_chunk_length}. Please consider padding the input to a length " f"of {input_shape[-1] + padding_length}." ) # pad input input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length( input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, input_shape=input_shape, padding_length=padding_length, padded_seq_length=least_common_mult_chunk_length, device=device, ) # start index for position encoding depends on incremental decoding if past_buckets_states is not None: start_idx_pos_encodings = past_buckets_states[0][1].shape[1] else: start_idx_pos_encodings = 0 embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, start_idx_pos_encodings=start_idx_pos_encodings, ) encoder_outputs = self.encoder( hidden_states=embedding_output, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) sequence_output = encoder_outputs.hidden_states # if padding was applied if must_pad_to_match_chunk_length: sequence_output = sequence_output[:, :orig_sequence_length] past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None attentions = encoder_outputs.all_attentions if output_attentions else None if not return_dict: return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None) return ReformerModelOutput( last_hidden_state=sequence_output, past_buckets_states=past_buckets_states, hidden_states=hidden_states, attentions=attentions, ) def _pad_to_mult_of_chunk_length( self, input_ids, inputs_embeds=None, attention_mask=None, position_ids=None, input_shape=None, padding_length=None, padded_seq_length=None, device=None, ): logger.info( f"Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a " f"multiple of `config.chunk_length`: {padded_seq_length}" ) padded_input_ids = torch.full( (input_shape[0], padding_length), self.config.pad_token_id, device=device, dtype=torch.long, ) # Extend `attention_mask` if attention_mask is not None: pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype) attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1) else: attention_mask = torch.cat( [ torch.ones(input_shape, device=device, dtype=torch.bool), torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.bool), ], dim=-1, ) # Extend `input_ids` with padding to match least common multiple chunk_length if input_ids is not None: input_ids = torch.cat([input_ids, padded_input_ids], dim=-1) input_shape = input_ids.size() # Pad position ids if given if position_ids is not None: padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device) padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length) position_ids = torch.cat([position_ids, padded_position_ids], dim=-1) # Extend `inputs_embeds` with padding to match least common multiple chunk_length if inputs_embeds is not None: padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids) inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2) input_shape = inputs_embeds.size() return input_ids, inputs_embeds, attention_mask, position_ids, input_shape @add_start_docstrings("""Reformer Model with a `language modeling` head on top.""", REFORMER_START_DOCSTRING) class ReformerModelWithLMHead(ReformerPreTrainedModel): _keys_to_ignore_on_load_missing = ["lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) assert config.is_decoder, "If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`." assert "local" not in self.config.attn_layers or config.local_num_chunks_after == 0, ( "If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not" f" {config.local_num_chunks_after}." ) assert "lsh" not in self.config.attn_layers or config.lsh_num_chunks_after == 0, ( "If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not" f" {config.lsh_num_chunks_after}." ) self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, past_buckets_states: Optional[List[Tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.lm_head(sequence_output) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1)) if not return_dict: output = (logits,) + reformer_outputs[1:] return ((loss,) + output) if loss is not None else output return ReformerModelWithLMHeadOutput( loss=loss, logits=logits, past_buckets_states=reformer_outputs.past_buckets_states, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, use_cache=None, num_hashes=None, **kwargs ): # only last token for inputs_ids if past is defined in kwargs if past_key_values is not None: input_ids = input_ids[:, -1:] inputs_dict = { "input_ids": input_ids, "past_buckets_states": past_key_values, "use_cache": use_cache, "num_hashes": num_hashes, } return inputs_dict def _reorder_cache(self, past_key_values, beam_idx): reord_past_buckets_states = [] for layer_past in past_key_values: # buckets if layer_past[0] is not None: reord_buckets = layer_past[0].index_select(0, beam_idx) else: reord_buckets = None # hidden states reord_hidden_states = layer_past[1].index_select(0, beam_idx) reord_past_buckets_states.append((reord_buckets, reord_hidden_states)) return reord_past_buckets_states @add_start_docstrings("""Reformer Model with a `language modeling` head on top.""", REFORMER_START_DOCSTRING) class ReformerForMaskedLM(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) assert not config.is_decoder, ( "If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional" " self-attention." ) self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels Returns: <Tip warning={true}> This example uses a false checkpoint since we don't have any available pretrained model for the masked language modeling task with the Reformer architecture. </Tip> Example: ```python >>> import torch >>> from transformers import AutoTokenizer, ReformerForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-reformer") >>> model = ReformerForMaskedLM.from_pretrained("hf-internal-testing/tiny-random-reformer") >>> # add mask_token >>> tokenizer.add_special_tokens({"mask_token": "[MASK]"}) # doctest: +IGNORE_RESULT >>> inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") >>> # resize model's embedding matrix >>> model.resize_token_embeddings(new_num_tokens=model.config.vocab_size + 1) # doctest: +IGNORE_RESULT >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> predicted_token = tokenizer.decode(predicted_token_id) ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> # mask labels of non-[MASK] tokens >>> labels = torch.where( ... inputs.input_ids == tokenizer.mask_token_id, labels[:, : inputs["input_ids"].shape[-1]], -100 ... ) >>> outputs = model(**inputs, labels=labels) >>> loss = round(outputs.loss.item(), 2) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, # no causal mask output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + reformer_outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) @add_start_docstrings( """ Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, REFORMER_START_DOCSTRING, ) class ReformerForSequenceClassification(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.reformer = ReformerModel(config) self.classifier = ReformerClassificationHead(config) if config.is_decoder is True: logger.warning("You might want to disable causal masking for sequence classification") # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, ReformerForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment") >>> model = ReformerForSequenceClassification.from_pretrained("google/reformer-crime-and-punishment") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> label = model.config.id2label[predicted_class_id] ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = ReformerForSequenceClassification.from_pretrained( ... "google/reformer-crime-and-punishment", num_labels=num_labels ... ) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ReformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, hidden_states, **kwargs): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states @add_start_docstrings( """ Reformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA ( a linear layer on top of hidden-states output to compute `span start logits` and `span end logits`. """, REFORMER_START_DOCSTRING, ) class ReformerForQuestionAnswering(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.reformer = ReformerModel(config) # 2 * config.hidden_size because we use reversible residual layers self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, # no causal mask output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + reformer_outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, )
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch REFORMER model.""" import sys from collections import namedtuple from dataclasses import dataclass from functools import reduce from operator import mul from typing import List, Optional, Tuple, Union import numpy as np import torch from torch import nn from torch.autograd.function import Function from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import CausalLMOutput, MaskedLMOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward from ...utils import ( DUMMY_INPUTS, DUMMY_MASK, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_reformer import ReformerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/reformer-crime-and-punishment" _CONFIG_FOR_DOC = "ReformerConfig" REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/reformer-crime-and-punishment", "google/reformer-enwik8", # See all Reformer models at https://huggingface.co/models?filter=reformer ] # Define named tuples for nn.Modules here LSHSelfAttentionOutput = namedtuple("LSHSelfAttentionOutput", ["hidden_states", "attention_probs", "buckets"]) LocalSelfAttentionOutput = namedtuple("LocalSelfAttentionOutput", ["hidden_states", "attention_probs"]) AttentionOutput = namedtuple("AttentionOutput", ["hidden_states", "attention_probs", "buckets"]) ReformerOutput = namedtuple("ReformerOutput", ["hidden_states", "attn_output", "attention_probs", "buckets"]) ReformerBackwardOutput = namedtuple( "ReformerBackwardOutput", ["attn_output", "hidden_states", "grad_attn_output", "grad_hidden_states"] ) ReformerEncoderOutput = namedtuple( "ReformerEncoderOutput", ["hidden_states", "all_hidden_states", "all_attentions", "past_buckets_states"], ) def _stable_argsort(vector, dim): # this function scales the vector so that torch.argsort is stable. # torch.argsort is not stable on its own scale_offset = torch.arange(vector.shape[dim], device=vector.device).view(1, 1, -1) scale_offset = scale_offset.expand(vector.shape) scaled_vector = vector.shape[dim] * vector + (scale_offset % vector.shape[dim]) return torch.argsort(scaled_vector, dim=dim) def _get_least_common_mult_chunk_len(config): attn_types = config.attn_layers attn_types_set = set(attn_types) if len(attn_types_set) == 1 and attn_types[0] == "lsh": return config.lsh_attn_chunk_length elif len(attn_types_set) == 1 and attn_types[0] == "local": return config.local_attn_chunk_length elif len(attn_types_set) == 2 and attn_types_set == {"lsh", "local"}: return np.lcm(config.lsh_attn_chunk_length, config.local_attn_chunk_length) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select " "attn layer types from ['lsh', 'local'] only." ) def _get_min_chunk_len(config): attn_types = config.attn_layers attn_types_set = set(attn_types) if len(attn_types_set) == 1 and attn_types[0] == "lsh": return config.lsh_attn_chunk_length elif len(attn_types_set) == 1 and attn_types[0] == "local": return config.local_attn_chunk_length elif len(attn_types_set) == 2 and attn_types_set == {"lsh", "local"}: return min(config.lsh_attn_chunk_length, config.local_attn_chunk_length) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but `config.attn_layers`: {config.attn_layers}. Select " "attn layer types from ['lsh', 'local'] only." ) class AxialPositionEmbeddings(nn.Module): """ Constructs axial position embeddings. Useful for very long input sequences to save memory and time. """ def __init__(self, config): super().__init__() self.axial_pos_shape = config.axial_pos_shape self.axial_pos_embds_dim = config.axial_pos_embds_dim self.dropout = config.hidden_dropout_prob self.least_common_mult_chunk_length = _get_least_common_mult_chunk_len(config) self.weights = nn.ParameterList() if sum(self.axial_pos_embds_dim) != config.hidden_size: raise ValueError( f"Make sure that config.axial_pos_embds factors: {self.axial_pos_embds_dim} sum to " f"config.hidden_size: {config.hidden_size}" ) # create weights for axis, axial_pos_embd_dim in enumerate(self.axial_pos_embds_dim): # create expanded shapes ax_shape = [1] * len(self.axial_pos_shape) ax_shape[axis] = self.axial_pos_shape[axis] ax_shape = tuple(ax_shape) + (axial_pos_embd_dim,) # create tensor and init self.weights.append(nn.Parameter(torch.ones(ax_shape, dtype=torch.float32))) def forward(self, position_ids): # broadcast weights to correct shape batch_size = position_ids.shape[0] sequence_length = position_ids.shape[1] broadcasted_weights = [ weight.expand((batch_size,) + self.axial_pos_shape + weight.shape[-1:]) for weight in self.weights ] if self.training is True: if reduce(mul, self.axial_pos_shape) != sequence_length: raise ValueError( f"If training, make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply to " f"sequence length. Got prod({self.axial_pos_shape}) != sequence_length: {sequence_length}. " f"You might want to consider padding your sequence length to {reduce(mul, self.axial_pos_shape)} " "or changing config.axial_pos_shape." ) if self.dropout > 0: weights = torch.cat(broadcasted_weights, dim=-1) # permute weights so that 2D correctly drops dims 1 and 2 transposed_weights = weights.transpose(2, 1) # drop entire matrix of last two dims (prev dims 1 and 2) dropped_transposed_weights = nn.functional.dropout2d( transposed_weights, p=self.dropout, training=self.training ) dropped_weights = dropped_transposed_weights.transpose(2, 1) position_encodings = torch.reshape(dropped_weights, (batch_size, sequence_length, -1)) else: position_encodings = torch.cat( [torch.reshape(weight, (batch_size, sequence_length, -1)) for weight in broadcasted_weights], dim=-1, ) else: if reduce(mul, self.axial_pos_shape) < sequence_length: raise ValueError( f"Make sure that config.axial_pos_shape factors: {self.axial_pos_shape} multiply at least to " f"max(sequence_length, least_common_mult_chunk_length): max({sequence_length}, " f"{self.least_common_mult_chunk_length})." ) # compute how many columns are needed max_position_id = position_ids.max().item() required_pos_encodings_columns = -(-(max_position_id + 1) // self.axial_pos_shape[1]) # cut to columns that are needed position_encodings = torch.cat( [weight[:, :required_pos_encodings_columns] for weight in broadcasted_weights], dim=-1 ) position_encodings = torch.reshape(position_encodings, (batch_size, -1, position_encodings.shape[-1])) # select correct position encodings position_encodings = torch.cat( [ torch.index_select(position_encodings[i], 0, position_ids[i]).unsqueeze(0) for i in range(batch_size) ], dim=0, ) return position_encodings class PositionEmbeddings(nn.Module): """Constructs conventional position embeddings of shape `[max_pos_embeddings, hidden_size]`.""" def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.embedding = nn.Embedding(config.max_position_embeddings, config.hidden_size) def forward(self, position_ids): position_embeddings = self.embedding(position_ids) position_embeddings = nn.functional.dropout(position_embeddings, p=self.dropout, training=self.training) return position_embeddings class ReformerEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.max_position_embeddings = config.max_position_embeddings self.dropout = config.hidden_dropout_prob self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size) self.position_embeddings = ( AxialPositionEmbeddings(config) if config.axial_pos_embds else PositionEmbeddings(config) ) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, start_idx_pos_encodings=0): if input_ids is not None: input_shape = input_ids.size() device = input_ids.device else: input_shape = inputs_embeds.size()[:-1] device = inputs_embeds.device seq_length = input_shape[1] if position_ids is None: position_ids = torch.arange( start_idx_pos_encodings, start_idx_pos_encodings + seq_length, dtype=torch.long, device=device ) position_ids = position_ids.unsqueeze(0).expand(input_shape) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if position_ids.shape[-1] > self.max_position_embeddings: raise ValueError( f"Sequence Length: {position_ids.shape[-1]} has to be less or equal than " f"config.max_position_embeddings {self.max_position_embeddings}." ) # dropout embeddings = nn.functional.dropout(inputs_embeds, p=self.dropout, training=self.training) # add positional embeddings position_embeddings = self.position_embeddings(position_ids) embeddings = embeddings + position_embeddings return embeddings class EfficientAttentionMixin: """ A few utilities for nn.Modules in Reformer, to be used as a mixin. """ def _look_adjacent(self, vectors, num_chunks_before, num_chunks_after): """ Used to implement attention between consecutive chunks. Args: vectors: array of shape [batch_size, num_attention_heads, n_chunks, chunk_len, ...] num_chunks_before: chunks before current chunk to include in attention num_chunks_after: chunks after current chunk to include in attention Returns: tensor of shape [num_chunks, N * chunk_length, ...], where N = (1 + num_chunks_before + num_chunks_after). """ if num_chunks_before == 0 and num_chunks_after == 0: return vectors slices = [] for i in range(-num_chunks_before, num_chunks_after + 1): if i == 0: slices.append(vectors) else: slices.append(torch.cat([vectors[:, :, i:, ...], vectors[:, :, :i, ...]], dim=2)) return torch.cat(slices, dim=3) def _split_hidden_size_dim(self, x, num_attn_heads, attn_head_size): """ splits hidden_size dim into attn_head_size and num_attn_heads """ new_x_shape = x.size()[:-1] + (num_attn_heads, attn_head_size) x = x.view(*new_x_shape) return x.transpose(2, 1) def _merge_hidden_size_dims(self, x, num_attn_heads, attn_head_size): """ merges attn_head_size dim and num_attn_heads dim into hidden_size """ x = x.permute(0, 2, 1, 3) return torch.reshape(x, (x.size()[0], -1, num_attn_heads * attn_head_size)) def _split_seq_length_dim_to(self, vectors, dim_factor_1, dim_factor_2, num_attn_heads, attn_head_size=None): """ splits sequence length dim of vectors into `dim_factor_1` and `dim_factor_2` dims """ batch_size = vectors.shape[0] split_dim_shape = (batch_size, num_attn_heads, dim_factor_1, dim_factor_2) if len(vectors.shape) == 4: return torch.reshape(vectors, split_dim_shape + (attn_head_size,)) elif len(vectors.shape) == 3: return torch.reshape(vectors, split_dim_shape) else: raise ValueError(f"Input vector rank should be one of [3, 4], but is: {len(vectors.shape)}") class LSHSelfAttention(nn.Module, EfficientAttentionMixin): def __init__(self, config): super().__init__() self.config = config self.chunk_length = config.lsh_attn_chunk_length self.num_hashes = config.num_hashes self.num_buckets = config.num_buckets self.num_chunks_before = config.lsh_num_chunks_before self.num_chunks_after = config.lsh_num_chunks_after self.hash_seed = config.hash_seed self.is_decoder = config.is_decoder self.max_position_embeddings = config.max_position_embeddings self.dropout = config.lsh_attention_probs_dropout_prob self.num_attention_heads = config.num_attention_heads self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.hidden_size = config.hidden_size # projection matrices self.query_key = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False) # save mask value here. Need fp32 and fp16 mask values self.register_buffer("self_mask_value_float16", torch.tensor(-1e3)) self.register_buffer("self_mask_value_float32", torch.tensor(-1e5)) self.register_buffer("mask_value_float16", torch.tensor(-1e4)) self.register_buffer("mask_value_float32", torch.tensor(-1e9)) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, buckets=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs, ): sequence_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] # num hashes can optionally be overwritten by user num_hashes = num_hashes if num_hashes is not None else self.num_hashes do_cached_attention = use_cache and past_buckets_states[1] is not None # check if cache shall be used and that hidden states are already cached if do_cached_attention: assert sequence_length == 1, ( "At the moment, auto-regressive language generation is only possible one word at a time. Make sure" f" that input sequence length {sequence_length} equals 1, when `past_buckets_states` is passed." ) past_buckets = past_buckets_states[0] past_states = past_buckets_states[1] # get query vector query_vectors = self.query_key(hidden_states) query_vectors = self._split_hidden_size_dim( query_vectors, self.num_attention_heads, self.attention_head_size ) if past_buckets is not None: key_value_hidden_states, sorted_bucket_idx, buckets = self._get_relevant_hid_states_and_buckets( query_vectors=query_vectors, attention_mask=attention_mask, num_hashes=num_hashes, hidden_states=hidden_states, past_states=past_states, past_buckets=past_buckets, ) query_key_vectors = self._query_per_attn_head(key_value_hidden_states) value_vectors = self._value_per_attn_head(key_value_hidden_states) # split key & value vectors by num hashes to apply # self attention on each separately query_key_vectors = self._split_seq_length_dim_to( query_key_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, num_hashes, -1, self.num_attention_heads, self.attention_head_size, ) # repeat query vectors across hash dimension query_vectors = query_vectors.unsqueeze(2).repeat(1, 1, num_hashes, 1, 1) else: key_value_hidden_states = torch.cat([past_states, hidden_states], dim=1) query_key_vectors = self.query_key(key_value_hidden_states) value_vectors = self.value(key_value_hidden_states) else: # project hidden_states to query_key and value query_vectors = None query_key_vectors = self.query_key(hidden_states) value_vectors = self.value(hidden_states) # if query key is not already split if not do_cached_attention or past_buckets is None: query_key_vectors = self._split_hidden_size_dim( query_key_vectors, self.num_attention_heads, self.attention_head_size ) value_vectors = self._split_hidden_size_dim( value_vectors, self.num_attention_heads, self.attention_head_size ) # cache buckets for next incremental decoding if do_cached_attention and past_buckets is None and key_value_hidden_states.shape[1] >= self.chunk_length: buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask) # free memory del hidden_states assert ( query_key_vectors.shape[-1] == self.attention_head_size ), f"last dim of query_key_vectors is {query_key_vectors.shape[-1]} but should be {self.attention_head_size}." assert ( value_vectors.shape[-1] == self.attention_head_size ), f"last dim of value_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}." do_standard_self_attention = (sequence_length <= self.chunk_length) or ( use_cache and past_buckets_states[1] is not None ) # LSH attention only makes sense if chunked attention should be performed if not do_standard_self_attention: # set `num_buckets` on the fly, recommended way to do it if self.num_buckets is None: self._set_num_buckets(sequence_length) # use cached buckets for backprop only if buckets is None: # hash query key vectors into buckets buckets = self._hash_vectors(query_key_vectors, num_hashes, attention_mask) else: # make sure buckets has correct shape for LSH attention buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes * sequence_length) assert ( int(buckets.shape[-1]) == num_hashes * sequence_length ), f"last dim of buckets is {buckets.shape[-1]}, but should be {num_hashes * sequence_length}" sorted_bucket_idx, undo_sorted_bucket_idx = self._get_sorted_bucket_idx_and_undo_sorted_bucket_idx( sequence_length, buckets, num_hashes ) # make sure bucket idx is not longer then sequence length sorted_bucket_idx_per_hash = sorted_bucket_idx % sequence_length # cluster query key value vectors according to hashed buckets query_key_vectors = self._gather_by_expansion(query_key_vectors, sorted_bucket_idx_per_hash, num_hashes) value_vectors = self._gather_by_expansion(value_vectors, sorted_bucket_idx_per_hash, num_hashes) query_key_vectors = self._split_seq_length_dim_to( query_key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) if self.chunk_length is None: assert self.num_chunks_before == 0 and self.num_chunks_after == 0, ( "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and" " `config.num_chunks_before` are set to 0." ) elif do_cached_attention and past_buckets is not None: # use max sequence length sorted_bucket_idx_per_hash = sorted_bucket_idx else: # get sequence length indices sorted_bucket_idx_per_hash = torch.arange(sequence_length, device=query_key_vectors.device).repeat( batch_size, self.num_attention_heads, 1 ) # scale key vectors sqrt_num = np.sqrt(self.attention_head_size) key_vectors = self._len_and_dim_norm(query_key_vectors, sqrt_num) # set query_vectors to query key vectors if LSH self attention query_vectors = query_vectors if query_vectors is not None else query_key_vectors # free memory del query_key_vectors # get attention probs out_vectors, logits, attention_probs = self._attend( query_vectors=query_vectors, key_vectors=key_vectors, value_vectors=value_vectors, sorted_bucket_idx_per_hash=sorted_bucket_idx_per_hash, attention_mask=attention_mask, head_mask=head_mask, do_standard_self_attention=do_standard_self_attention, do_cached_attention=do_cached_attention, ) # free memory del key_vectors, value_vectors # re-order out_vectors and logits if not do_standard_self_attention: # sort clusters back to correct ordering out_vectors, logits = ReverseSort.apply(out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx) if not do_standard_self_attention or (do_cached_attention and past_buckets is not None): # sum up all hash rounds if num_hashes > 1: out_vectors = self._split_seq_length_dim_to( out_vectors, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size, ) logits = self._split_seq_length_dim_to( logits, num_hashes, sequence_length, self.num_attention_heads, self.attention_head_size, ).unsqueeze(-1) probs_vectors = torch.exp(logits - torch.logsumexp(logits, dim=2, keepdim=True)) out_vectors = torch.sum(out_vectors * probs_vectors, dim=2) # free memory del probs_vectors # free memory del logits assert out_vectors.shape == ( batch_size, self.num_attention_heads, sequence_length, self.attention_head_size, ), ( "out_vectors have be of shape `[batch_size, config.num_attention_heads, sequence_length," " config.attention_head_size]`." ) out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size) if output_attentions is False: attention_probs = () if buckets is not None: buckets = buckets.view(batch_size, self.num_attention_heads, num_hashes, -1) return LSHSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs, buckets=buckets) def _query_per_attn_head(self, hidden_states): per_head_query_key = self.query_key.weight.reshape( self.num_attention_heads, self.attention_head_size, self.hidden_size ).transpose(-2, -1) # only relevant for inference and no bias => we can use einsum here query_key_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_query_key) return query_key_vectors def _value_per_attn_head(self, hidden_states): per_head_value = self.value.weight.reshape( self.num_attention_heads, self.attention_head_size, self.hidden_size ).transpose(-2, -1) # only relevant for inference and no bias => we can use einsum here value_vectors = torch.einsum("balh,ahr->balr", hidden_states, per_head_value) return value_vectors def _hash_vectors(self, vectors, num_hashes, attention_mask, increase_num_buckets=False): batch_size = vectors.shape[0] # See https://arxiv.org/pdf/1509.02897.pdf # We sample a different random rotation for each round of hashing to # decrease the probability of hash misses. if isinstance(self.num_buckets, int): assert ( self.num_buckets % 2 == 0 ), f"There should be an even number of buckets, but `self.num_buckets`: {self.num_buckets}" rotation_size = self.num_buckets num_buckets = self.num_buckets else: # Factorize the hash if self.num_buckets is a list or tuple rotation_size, num_buckets = 0, 1 for bucket_factor in self.num_buckets: assert ( bucket_factor % 2 == 0 ), f"The number of buckets should be even, but `num_bucket`: {bucket_factor}" rotation_size = rotation_size + bucket_factor num_buckets = num_buckets * bucket_factor # remove gradient vectors = vectors.detach() if self.hash_seed is not None: # for determinism torch.manual_seed(self.hash_seed) rotations_shape = (self.num_attention_heads, vectors.shape[-1], num_hashes, rotation_size // 2) # create a random self.attention_head_size x num_hashes x num_buckets/2 random_rotations = torch.randn(rotations_shape, device=vectors.device, dtype=vectors.dtype) # Output dim: Batch_Size x Num_Attn_Heads x Num_Hashes x Seq_Len x Num_Buckets/2 rotated_vectors = torch.einsum("bmtd,mdhr->bmhtr", vectors, random_rotations) if isinstance(self.num_buckets, int) or len(self.num_buckets) == 1: rotated_vectors = torch.cat([rotated_vectors, -rotated_vectors], dim=-1) buckets = torch.argmax(rotated_vectors, dim=-1) else: # Get the buckets for them and combine. buckets, cur_sum, cur_product = None, 0, 1 for bucket_factor in self.num_buckets: rotated_vectors_factor = rotated_vectors[..., cur_sum : cur_sum + (bucket_factor // 2)] cur_sum = cur_sum + bucket_factor // 2 rotated_vectors_factor = torch.cat([rotated_vectors_factor, -rotated_vectors_factor], dim=-1) if buckets is None: buckets = torch.argmax(rotated_vectors_factor, dim=-1) else: buckets = buckets + (cur_product * torch.argmax(rotated_vectors_factor, dim=-1)) cur_product = cur_product * bucket_factor if attention_mask is not None and (attention_mask.sum().item() < batch_size * attention_mask.shape[-1]): # add an extra bucket for padding tokens only num_buckets = num_buckets + 1 # assign padding tokens extra bucket buckets_mask = attention_mask.to(torch.bool)[:, None, None, :].expand(buckets.shape) buckets = torch.where( buckets_mask, buckets, torch.tensor(num_buckets - 1, dtype=torch.long, device=buckets.device) ) elif increase_num_buckets: num_buckets = num_buckets + 1 # buckets is now (Batch_size x Num_Attn_Heads x Num_Hashes x Seq_Len). # Next we add offsets so that bucket numbers from different hashing rounds don't overlap. offsets = torch.arange(num_hashes, device=vectors.device) offsets = (offsets * num_buckets).view((1, 1, -1, 1)) # expand to batch size and num attention heads offsets = offsets.expand((batch_size, self.num_attention_heads) + offsets.shape[-2:]) offset_buckets = (buckets + offsets).flatten(start_dim=2, end_dim=3) return offset_buckets def _get_sorted_bucket_idx_and_undo_sorted_bucket_idx(self, sequence_length, buckets, num_hashes): # no gradients are needed with torch.no_grad(): # hash-based sort sorted_bucket_idx = _stable_argsort(buckets, dim=-1) # create simple indices to scatter to, to have undo sort indices = ( torch.arange(sorted_bucket_idx.shape[-1], device=buckets.device) .view(1, 1, -1) .expand(sorted_bucket_idx.shape) ) # get undo sort undo_sorted_bucket_idx = sorted_bucket_idx.new(*sorted_bucket_idx.size()) undo_sorted_bucket_idx.scatter_(-1, sorted_bucket_idx, indices) return sorted_bucket_idx, undo_sorted_bucket_idx def _set_num_buckets(self, sequence_length): # `num_buckets` should be set to 2 * sequence_length // chunk_length as recommended in paper num_buckets_pow_2 = (2 * (sequence_length // self.chunk_length)).bit_length() - 1 # make sure buckets are power of 2 num_buckets = 2**num_buckets_pow_2 # factorize `num_buckets` if `num_buckets` becomes too large num_buckets_limit = 2 * max( int((self.max_position_embeddings // self.chunk_length) ** (0.5)), self.chunk_length, ) if num_buckets > num_buckets_limit: num_buckets = [2 ** (num_buckets_pow_2 // 2), 2 ** (num_buckets_pow_2 - num_buckets_pow_2 // 2)] logger.warning(f"config.num_buckets is not set. Setting config.num_buckets to {num_buckets}...") # set num buckets in config to be properly saved self.config.num_buckets = num_buckets self.num_buckets = num_buckets def _attend( self, query_vectors, key_vectors, value_vectors, sorted_bucket_idx_per_hash, attention_mask, head_mask, do_standard_self_attention, do_cached_attention, ): # look at previous and following chunks if chunked attention if not do_standard_self_attention: key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after) value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after) # get logits and dots # (BS, NumAttn, NumHash x NumChunk, Chunk_L x Hidden),(BS, NumAttn, NumHash x NumChunk, Chunk_L * (Num_bef + Num_aft + 1) x Hidden) -> (BS, NumAttn, NumHash x NumChunk, Chunk_L, Chunk_L * (1 + Num_bef + Num_aft)) query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2)) # free memory del query_vectors, key_vectors # if chunked attention split bucket idxs to query and key if not do_standard_self_attention: query_bucket_idx = self._split_seq_length_dim_to( sorted_bucket_idx_per_hash, -1, self.chunk_length, self.num_attention_heads ) key_value_bucket_idx = self._look_adjacent(query_bucket_idx, self.num_chunks_before, self.num_chunks_after) elif do_cached_attention and query_key_dots.ndim > 4: key_value_bucket_idx = sorted_bucket_idx_per_hash query_bucket_idx = ( key_value_bucket_idx.new_ones(key_value_bucket_idx.shape[:-1] + (1,)) * key_value_bucket_idx.max() ) elif do_cached_attention and query_key_dots.ndim <= 4: query_bucket_idx = (query_key_dots.shape[-1] - 1) * torch.ones_like(query_key_dots)[:, :, :, -1] key_value_bucket_idx = torch.arange( query_key_dots.shape[-1], dtype=torch.long, device=query_key_dots.device )[None, None, :].expand(query_bucket_idx.shape[:2] + (-1,)) else: query_bucket_idx = key_value_bucket_idx = sorted_bucket_idx_per_hash # get correct mask values depending on precision if query_key_dots.dtype == torch.float16: self_mask_value = self.self_mask_value_float16.half() mask_value = self.mask_value_float16.half() else: self_mask_value = self.self_mask_value_float32 mask_value = self.mask_value_float32 if not do_cached_attention: mask = self._compute_attn_mask( query_bucket_idx, key_value_bucket_idx, attention_mask, query_key_dots.shape, do_standard_self_attention, ) if mask is not None: query_key_dots = torch.where(mask, query_key_dots, mask_value) # free memory del mask # Self mask is ALWAYS applied. # From the reformer paper (https://arxiv.org/pdf/2001.04451.pdf): # " While attention to the future is not allowed, typical implementations of the # Transformer do allow a position to attend to itself. # Such behavior is undesirable in a shared-QK formulation because the dot-product # of a query vector with itself will almost always be greater than the dot product of a # query vector with a vector at another position. We therefore modify the masking # to forbid a token from attending to itself, except in situations # where a token has no other valid attention targets (e.g. the first token in a sequence) " self_mask = torch.ne(query_bucket_idx.unsqueeze(-1), key_value_bucket_idx.unsqueeze(-2)).to( query_bucket_idx.device ) # apply self_mask query_key_dots = torch.where(self_mask, query_key_dots, self_mask_value) # free memory del self_mask logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True) # dots shape is `[batch_size, num_attn_heads, num_hashes * seq_len // chunk_length, chunk_length, chunk_length * (1 + num_chunks_before + num_chunks_after)]` attention_probs = torch.exp(query_key_dots - logits) # free memory del query_key_dots # dropout attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # attend values out_vectors = torch.matmul(attention_probs, value_vectors) # free memory del value_vectors # merge chunk length if out_vectors.ndim > 4: logits = logits.flatten(start_dim=2, end_dim=3).squeeze(-1) out_vectors = out_vectors.flatten(start_dim=2, end_dim=3) return out_vectors, logits, attention_probs def _compute_attn_mask( self, query_indices, key_indices, attention_mask, query_key_dot_shape, do_standard_self_attention ): # attention mask for LSH if attention_mask is not None: # if chunked attention, the attention mask has to correspond to LSH order attention_mask = attention_mask.to(torch.bool)[:, None, :] if not do_standard_self_attention: # expand attn_mask to fit with key_value_bucket_idx shape attention_mask = attention_mask[:, None, :] attention_mask = attention_mask.expand(query_indices.shape[:-1] + (-1,)) # extract attention mask from LSH sorted key_indices attention_mask = torch.gather(attention_mask, -1, key_indices) attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dot_shape) # Causal mask if self.is_decoder is True: causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device) # add attention mask if not None if attention_mask is not None: attention_mask = causal_mask * attention_mask else: attention_mask = causal_mask return attention_mask def _get_relevant_hid_states_and_buckets( self, query_vectors, attention_mask, num_hashes, hidden_states, past_states, past_buckets ): # concat hidden states hidden_states = torch.cat([past_states, hidden_states], dim=1) # batch_size hidden batch_size = hidden_states.shape[0] sequence_length = hidden_states.shape[1] # check if cached buckets include pad bucket max_bucket = self.num_buckets if isinstance(self.num_buckets, int) else reduce(mul, self.num_buckets) # if pad bucket was cached => need to increase num buckets for caching increase_num_buckets = past_buckets.max() > num_hashes * max_bucket - 1 # retrieve query buckets query_buckets = self._hash_vectors( query_vectors, num_hashes, attention_mask, increase_num_buckets=increase_num_buckets ) # concat buckets concat_buckets = torch.cat([past_buckets, query_buckets.unsqueeze(-1)], dim=-1) # hash-based sort bucket_idx = _stable_argsort(concat_buckets, dim=-1) # bucket_idx has shape: BatchSize x NumAttnHeads x NumHashes x SequenceLength assert bucket_idx.shape == ( batch_size, self.num_attention_heads, num_hashes, sequence_length, ), ( f"bucket_idx should have shape {(batch_size, self.num_attention_heads, num_hashes, sequence_length)}, but" f" has shape {bucket_idx.shape}." ) # find indices of new bucket indices relevant_bucket_idx = (bucket_idx == (bucket_idx.shape[-1] - 1)).nonzero() # expand relevant bucket indices to its chunks relevant_bucket_idx_chunk = self._expand_to_indices_in_relevant_chunk(relevant_bucket_idx, sequence_length) relevant_bucket_idx_chunk = bucket_idx[tuple(relevant_bucket_idx_chunk.transpose(0, 1))] # adapt bucket_idx for batch and hidden states for index select offset = torch.arange(relevant_bucket_idx_chunk.shape[-1], device=hidden_states.device, dtype=torch.long) bucket_idx_batch_offset = sequence_length * ( batch_size * torch.div(offset, relevant_bucket_idx_chunk.shape[-1], rounding_mode="floor") ) # add batch offset relevant_bucket_idx_chunk_all_batch = relevant_bucket_idx_chunk + bucket_idx_batch_offset hidden_states = hidden_states.reshape((-1, self.hidden_size)) # select all relevant hidden states relevant_hidden_states = hidden_states.index_select(0, relevant_bucket_idx_chunk_all_batch) # reshape hidden states and bucket_idx to correct output relevant_hidden_states = relevant_hidden_states.reshape( batch_size, self.num_attention_heads, -1, self.hidden_size ) relevant_bucket_idx_chunk = relevant_bucket_idx_chunk.reshape( batch_size, self.num_attention_heads, num_hashes, -1 ) assert ( relevant_hidden_states.shape[2] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes ), ( "There should be" f" {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length * num_hashes} `hidden_states`," f" there are {relevant_hidden_states.shape[2]} `hidden_states`." ) assert ( relevant_bucket_idx_chunk.shape[-1] == (self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length ), ( "There should be" f" {(self.num_chunks_before + self.num_chunks_after + 1) * self.chunk_length} `hidden_states`, there are" f" {relevant_bucket_idx_chunk.shape[-1]} `bucket_idx`." ) return relevant_hidden_states, relevant_bucket_idx_chunk, query_buckets def _expand_to_indices_in_relevant_chunk(self, indices, sequence_length): # get relevant indices of where chunk starts and its size start_indices_chunk = ((indices[:, -1] // self.chunk_length) - self.num_chunks_before) * self.chunk_length total_chunk_size = self.chunk_length * (1 + self.num_chunks_before + self.num_chunks_after) # expand start indices and add correct chunk offset via arange expanded_start_indices = start_indices_chunk.unsqueeze(-1).expand(indices.shape[0], total_chunk_size) chunk_sequence_indices = expanded_start_indices + torch.arange( total_chunk_size, device=indices.device, dtype=torch.long ).unsqueeze(0).expand(indices.shape[0], total_chunk_size) # make sure that circular logic holds via % seq len chunk_sequence_indices = chunk_sequence_indices.flatten() % sequence_length # expand indices and set indices correctly indices = indices.unsqueeze(1).expand((indices.shape[0], total_chunk_size, -1)).flatten(0, 1).clone() indices[:, -1] = chunk_sequence_indices return indices def _len_and_dim_norm(self, vectors, sqrt_num): """ length and attention head size dim normalization """ vectors = self._len_norm(vectors) vectors = vectors / sqrt_num return vectors def _len_norm(self, x, epsilon=1e-6): """ length normalization """ variance = torch.mean(x**2, -1, keepdim=True) norm_x = x * torch.rsqrt(variance + epsilon) return norm_x def _gather_by_expansion(self, vectors, idxs, num_hashes): """ expand dims of idxs and vectors for all hashes and gather """ expanded_idxs = idxs.unsqueeze(-1).expand(-1, -1, -1, self.attention_head_size) vectors = vectors.repeat(1, 1, num_hashes, 1) return torch.gather(vectors, 2, expanded_idxs) class ReverseSort(Function): """ After chunked attention is applied which sorted clusters, original ordering has to be restored. Since customized backward function is used for Reformer, the gradients of the output vectors have to be explicitly sorted here. """ @staticmethod def forward(ctx, out_vectors, logits, sorted_bucket_idx, undo_sorted_bucket_idx): # save sorted_bucket_idx for backprop with torch.no_grad(): ctx.sorted_bucket_idx = sorted_bucket_idx # undo sort to have correct order for next layer expanded_undo_sort_indices = undo_sorted_bucket_idx.unsqueeze(-1).expand(out_vectors.shape) out_vectors = torch.gather(out_vectors, 2, expanded_undo_sort_indices) logits = torch.gather(logits, 2, undo_sorted_bucket_idx) return out_vectors, logits @staticmethod def backward(ctx, grad_out_vectors, grad_logits): # get parameters saved in ctx sorted_bucket_idx = ctx.sorted_bucket_idx expanded_sort_indices = sorted_bucket_idx.unsqueeze(-1).expand(grad_out_vectors.shape) # reverse sort of forward grad_out_vectors = torch.gather(grad_out_vectors, 2, expanded_sort_indices) grad_logits = torch.gather(grad_logits, 2, sorted_bucket_idx) # return grad and `None` fillers for last 2 forward args return grad_out_vectors, grad_logits, None, None class LocalSelfAttention(nn.Module, EfficientAttentionMixin): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.chunk_length = config.local_attn_chunk_length self.num_chunks_before = config.local_num_chunks_before self.num_chunks_after = config.local_num_chunks_after self.is_decoder = config.is_decoder self.pad_token_id = config.pad_token_id self.attention_head_size = config.attention_head_size self.all_head_size = self.num_attention_heads * self.attention_head_size self.hidden_size = config.hidden_size # projection matrices self.query = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.key = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.value = nn.Linear(self.hidden_size, self.all_head_size, bias=False) self.dropout = config.local_attention_probs_dropout_prob # save mask value here self.register_buffer("mask_value_float16", torch.tensor(-1e4)) self.register_buffer("mask_value_float32", torch.tensor(-1e9)) def forward( self, hidden_states, attention_mask=None, head_mask=None, past_buckets_states=None, use_cache=False, output_attentions=False, **kwargs, ): sequence_length = hidden_states.shape[1] batch_size = hidden_states.shape[0] # check if cache shall be used and that hidden states are already cached if use_cache and past_buckets_states[1] is not None: assert past_buckets_states[0] is None, ( "LocalSelfAttention should not make use of `buckets`. There seems to be an error when caching" " hidden_states_and_buckets." ) key_value_hidden_states = self._retrieve_relevant_hidden_states( past_buckets_states[1], self.chunk_length, self.num_chunks_before ) key_value_hidden_states = torch.cat([key_value_hidden_states, hidden_states], dim=1) # only query vector for last token query_vectors = self.query(hidden_states) # compute key and value for relevant chunk key_vectors = self.key(key_value_hidden_states) value_vectors = self.value(key_value_hidden_states) # free memory del key_value_hidden_states else: # project hidden_states to query, key and value query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) # split last dim into `config.num_attention_heads` and `config.attention_head_size` query_vectors = self._split_hidden_size_dim(query_vectors, self.num_attention_heads, self.attention_head_size) key_vectors = self._split_hidden_size_dim(key_vectors, self.num_attention_heads, self.attention_head_size) value_vectors = self._split_hidden_size_dim(value_vectors, self.num_attention_heads, self.attention_head_size) assert ( query_vectors.shape[-1] == self.attention_head_size ), f"last dim of query_key_vectors is {query_vectors.shape[-1]} but should be {self.attention_head_size}." assert ( key_vectors.shape[-1] == self.attention_head_size ), f"last dim of query_key_vectors is {key_vectors.shape[-1]} but should be {self.attention_head_size}." assert ( value_vectors.shape[-1] == self.attention_head_size ), f"last dim of query_key_vectors is {value_vectors.shape[-1]} but should be {self.attention_head_size}." if self.chunk_length is None: assert self.num_chunks_before == 0 and self.num_chunks_after == 0, ( "If `config.chunk_length` is `None`, make sure `config.num_chunks_after` and" " `config.num_chunks_before` are set to 0." ) # normalize key vectors key_vectors = key_vectors / np.sqrt(self.attention_head_size) # get sequence length indices indices = torch.arange(sequence_length, device=query_vectors.device).repeat( batch_size, self.num_attention_heads, 1 ) # if one should do normal n^2 self-attention do_standard_self_attention = sequence_length <= self.chunk_length # if input should be chunked if not do_standard_self_attention: # chunk vectors # B x Num_Attn_Head x Seq_Len // chunk_len x chunk_len x attn_head_size query_vectors = self._split_seq_length_dim_to( query_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) key_vectors = self._split_seq_length_dim_to( key_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) value_vectors = self._split_seq_length_dim_to( value_vectors, -1, self.chunk_length, self.num_attention_heads, self.attention_head_size, ) # chunk indices query_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads) key_indices = self._split_seq_length_dim_to(indices, -1, self.chunk_length, self.num_attention_heads) # append chunks before and after key_vectors = self._look_adjacent(key_vectors, self.num_chunks_before, self.num_chunks_after) value_vectors = self._look_adjacent(value_vectors, self.num_chunks_before, self.num_chunks_after) key_indices = self._look_adjacent(key_indices, self.num_chunks_before, self.num_chunks_after) else: query_indices = key_indices = indices # query-key matmul: QK^T query_key_dots = torch.matmul(query_vectors, key_vectors.transpose(-1, -2)) # free memory del query_vectors, key_vectors mask = self._compute_attn_mask( query_indices, key_indices, attention_mask, query_key_dots.shape, do_standard_self_attention ) if mask is not None: # get mask tensor depending on half precision or not if query_key_dots.dtype == torch.float16: mask_value = self.mask_value_float16.half() else: mask_value = self.mask_value_float32 query_key_dots = torch.where(mask, query_key_dots, mask_value) # free memory del mask # softmax logits = torch.logsumexp(query_key_dots, dim=-1, keepdim=True) attention_probs = torch.exp(query_key_dots - logits) # free memory del logits # dropout attention_probs = nn.functional.dropout(attention_probs, p=self.dropout, training=self.training) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask # attend values out_vectors = torch.matmul(attention_probs, value_vectors) # free memory del value_vectors # merge chunk length if not do_standard_self_attention: out_vectors = out_vectors.flatten(start_dim=2, end_dim=3) assert out_vectors.shape == ( batch_size, self.num_attention_heads, sequence_length, self.attention_head_size, ) out_vectors = self._merge_hidden_size_dims(out_vectors, self.num_attention_heads, self.attention_head_size) if output_attentions is False: attention_probs = () return LocalSelfAttentionOutput(hidden_states=out_vectors, attention_probs=attention_probs) def _compute_attn_mask( self, query_indices, key_indices, attention_mask, query_key_dots_shape, do_standard_self_attention ): # chunk attention mask and look before and after if attention_mask is not None: attention_mask = attention_mask.to(torch.bool)[:, None, :] if not do_standard_self_attention: attention_mask = self._split_seq_length_dim_to(attention_mask, -1, self.chunk_length, 1) attention_mask = self._look_adjacent(attention_mask, self.num_chunks_before, self.num_chunks_after) # create attn_mask attention_mask = attention_mask.unsqueeze(-2).expand(query_key_dots_shape) # Causal mask if self.is_decoder is True: causal_mask = torch.ge(query_indices.unsqueeze(-1), key_indices.unsqueeze(-2)).to(query_indices.device) # add attention mask if not None if attention_mask is not None: attention_mask = causal_mask * attention_mask else: attention_mask = causal_mask return attention_mask @staticmethod def _retrieve_relevant_hidden_states(previous_hidden_states, chunk_length, num_chunks_before): start_position = ((previous_hidden_states.shape[1] // chunk_length) - num_chunks_before) * chunk_length return previous_hidden_states[:, start_position:] class ReformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() all_head_size = config.num_attention_heads * config.attention_head_size self.dropout = config.hidden_dropout_prob self.dense = nn.Linear(all_head_size, config.hidden_size, bias=False) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ReformerAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.layer_id = layer_id self.attn_layers = config.attn_layers self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) if len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "lsh": self.self_attention = LSHSelfAttention(config) elif len(set(self.attn_layers)) == 1 and self.attn_layers[0] == "local": self.self_attention = LocalSelfAttention(config) elif len(set(self.attn_layers)) == 2 and set(self.attn_layers) == {"lsh", "local"}: # get correct attn layers if self.attn_layers[self.layer_id] == "lsh": self.self_attention = LSHSelfAttention(config) else: self.self_attention = LocalSelfAttention(config) else: raise NotImplementedError( f"Only attn layer types 'lsh' and 'local' exist, but got `config.attn_layers`: {self.attn_layers}. " "Select attn layer types from ['lsh', 'local'] only." ) self.output = ReformerSelfOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, buckets=None, ): hidden_states = self.layer_norm(hidden_states) # make sure cached hidden states is set to None for backward pass if past_buckets_states is not None: past_buckets_states_layer = past_buckets_states[self.layer_id] else: past_buckets_states_layer = None # use cached buckets for backprob if buckets not None for LSHSelfAttention self_attention_outputs = self.self_attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states_layer, use_cache=use_cache, output_attentions=output_attentions, buckets=buckets, ) # add buckets if necessary if hasattr(self_attention_outputs, "buckets"): buckets = self_attention_outputs.buckets else: buckets = None # cache hidden states for future use if use_cache: if past_buckets_states[self.layer_id][0] is None: # padded input should not be cached past_buckets = ( buckets[:, :, :, :orig_sequence_length] if (buckets is not None and orig_sequence_length > 1) else buckets ) else: past_buckets = torch.cat([past_buckets_states[self.layer_id][0], buckets], dim=-1) if past_buckets_states[self.layer_id][1] is None: # padded input should not be cached past_states = hidden_states[:, :orig_sequence_length] else: past_states = torch.cat([past_buckets_states[self.layer_id][1], hidden_states], dim=1) past_buckets_states[self.layer_id] = (past_buckets, past_states) # compute attention feed forward output attention_output = self.output(self_attention_outputs.hidden_states) return AttentionOutput( hidden_states=attention_output, attention_probs=self_attention_outputs.attention_probs, buckets=buckets, ) class ReformerFeedForwardDense(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob if isinstance(config.hidden_act, str): self.act_fn = ACT2FN[config.hidden_act] else: self.act_fn = config.hidden_act self.dense = nn.Linear(config.hidden_size, config.feed_forward_size) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = self.act_fn(hidden_states) return hidden_states class ReformerFeedForwardOutput(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.dense = nn.Linear(config.feed_forward_size, config.hidden_size) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return hidden_states class ChunkReformerFeedForward(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dense = ReformerFeedForwardDense(config) self.output = ReformerFeedForwardOutput(config) def forward(self, attention_output): return apply_chunking_to_forward( self.forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output, ) def forward_chunk(self, hidden_states): hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense(hidden_states) return self.output(hidden_states) class ReformerLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.attention = ReformerAttention(config, layer_id) # dropout requires to have the same # seed for forward and backward pass self.attention_seed = None self.feed_forward_seed = None self.feed_forward = ChunkReformerFeedForward(config) def _init_attention_seed(self): """ This function sets a new seed for the attention layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.attention_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.attention_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.attention_seed) def _init_feed_forward_seed(self): """ This function sets a new seed for the feed forward layer to make dropout deterministic for both forward calls: 1 normal forward call and 1 forward call in backward to recalculate activations. """ # randomize seeds # use cuda generator if available if hasattr(torch.cuda, "default_generators") and len(torch.cuda.default_generators) > 0: # GPU device_idx = torch.cuda.current_device() self.feed_forward_seed = torch.cuda.default_generators[device_idx].seed() else: # CPU self.feed_forward_seed = int(torch.seed() % sys.maxsize) torch.manual_seed(self.feed_forward_seed) def forward( self, prev_attn_output, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_attentions=False, ): with torch.no_grad(): # every forward pass we sample a different seed # for dropout and save for forward fn in backward pass # to have correct dropout if self.training: self._init_attention_seed() attn_outputs = self.attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = attn_outputs.hidden_states # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # Y_1 = X_1 + f(X_2) attn_output = prev_attn_output + attn_output # free memory del prev_attn_output # every forward pass we sample a different seed # for dropout and save seed for forward fn in backward # to have correct dropout if self.training: self._init_feed_forward_seed() # Y_2 = X_2 + g(Y_1) hidden_states = hidden_states + self.feed_forward(attn_output) return ReformerOutput( attn_output=attn_output, hidden_states=hidden_states, attention_probs=attn_outputs.attention_probs, buckets=attn_outputs.buckets, ) def backward_pass( self, next_attn_output, hidden_states, grad_attn_output, grad_hidden_states, attention_mask=None, head_mask=None, buckets=None, ): # Implements the backward pass for reversible ResNets. # A good blog post on how this works can be found here: # Implementation of RevNet (see Fig. 6 in https://towardsdatascience.com/illustrating-the-reformer-393575ac6ba0) # This code is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py assert self.training, ( "If you want to train `ReformerModel` and its variations, make sure to use `model.train()` to put the" " model into training mode." ) with torch.enable_grad(): next_attn_output.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.feed_forward_seed) # g(Y_1) res_hidden_states = self.feed_forward(next_attn_output) res_hidden_states.backward(grad_hidden_states, retain_graph=True) with torch.no_grad(): # X_2 = Y_2 - g(Y_1) hidden_states = hidden_states - res_hidden_states del res_hidden_states grad_attn_output = grad_attn_output + next_attn_output.grad next_attn_output.grad = None with torch.enable_grad(): hidden_states.requires_grad = True # set seed to have correct dropout torch.manual_seed(self.attention_seed) # f(X_2) # use cached buckets for backprob if buckets not None for LSHSelfAttention output = self.attention( hidden_states=hidden_states, head_mask=head_mask, attention_mask=attention_mask, buckets=buckets, ).hidden_states output.backward(grad_attn_output, retain_graph=True) with torch.no_grad(): # X_1 = Y_1 - f(X_2) attn_output = next_attn_output - output del output, next_attn_output grad_hidden_states = grad_hidden_states + hidden_states.grad hidden_states.grad = None hidden_states = hidden_states.detach() return ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, ) class _ReversibleFunction(Function): """ To prevent PyTorch from performing the usual backpropagation, a customized backward function is implemented here. This way it is made sure that no memory expensive activations are saved during the forward pass. This function is heavily inspired by https://github.com/lucidrains/reformer-pytorch/blob/master/reformer_pytorch/reversible.py """ @staticmethod def forward( ctx, hidden_states, layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions, ): all_buckets = () # split duplicated tensor hidden_states, attn_output = torch.chunk(hidden_states, 2, dim=-1) for layer_id, (layer, layer_head_mask) in enumerate(zip(layers, head_mask)): if output_hidden_states is True: all_hidden_states.append(hidden_states) layer_outputs = layer( prev_attn_output=attn_output, hidden_states=hidden_states, attention_mask=attention_mask, head_mask=layer_head_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_attentions=output_attentions, ) attn_output = layer_outputs.attn_output hidden_states = layer_outputs.hidden_states all_buckets = all_buckets + (layer_outputs.buckets,) if output_attentions: all_attentions.append(layer_outputs.attention_probs) # Add last layer if output_hidden_states is True: all_hidden_states.append(hidden_states) # attach params to ctx for backward ctx.save_for_backward(attn_output.detach(), hidden_states.detach()) ctx.layers = layers ctx.all_buckets = all_buckets ctx.head_mask = head_mask ctx.attention_mask = attention_mask # Concatenate 2 RevNet outputs return torch.cat([attn_output, hidden_states], dim=-1) @staticmethod def backward(ctx, grad_hidden_states): grad_attn_output, grad_hidden_states = torch.chunk(grad_hidden_states, 2, dim=-1) # retrieve params from ctx for backward attn_output, hidden_states = ctx.saved_tensors # create tuple output = ReformerBackwardOutput( attn_output=attn_output, hidden_states=hidden_states, grad_attn_output=grad_attn_output, grad_hidden_states=grad_hidden_states, ) # free memory del grad_attn_output, grad_hidden_states, attn_output, hidden_states layers = ctx.layers all_buckets = ctx.all_buckets head_mask = ctx.head_mask attention_mask = ctx.attention_mask for idx, layer in enumerate(layers[::-1]): # pop last buckets from stack buckets = all_buckets[-1] all_buckets = all_buckets[:-1] # backprop output = layer.backward_pass( next_attn_output=output.attn_output, hidden_states=output.hidden_states, grad_attn_output=output.grad_attn_output, grad_hidden_states=output.grad_hidden_states, head_mask=head_mask[len(layers) - idx - 1], attention_mask=attention_mask, buckets=buckets, ) assert all_buckets == (), "buckets have to be empty after backpropagation" grad_hidden_states = torch.cat([output.grad_attn_output, output.grad_hidden_states], dim=-1) # num of return vars has to match num of forward() args # return gradient for hidden_states arg and None for other args return grad_hidden_states, None, None, None, None, None, None, None, None, None, None, None class ReformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.dropout = config.hidden_dropout_prob self.layers = nn.ModuleList([ReformerLayer(config, i) for i in range(config.num_hidden_layers)]) # Reformer is using Rev Nets, thus last layer outputs are concatenated and # Layer Norm is done over 2 * hidden_size self.layer_norm = nn.LayerNorm(2 * config.hidden_size, eps=config.layer_norm_eps) def forward( self, hidden_states, attention_mask=None, head_mask=None, num_hashes=None, past_buckets_states=None, use_cache=False, orig_sequence_length=None, output_hidden_states=False, output_attentions=False, ): # hidden_states and attention lists to be filled if wished all_hidden_states = [] all_attentions = [] # init cached hidden states if necessary if past_buckets_states is None: past_buckets_states = [((None), (None)) for i in range(len(self.layers))] # concat same tensor for reversible ResNet hidden_states = torch.cat([hidden_states, hidden_states], dim=-1) hidden_states = _ReversibleFunction.apply( hidden_states, self.layers, attention_mask, head_mask, num_hashes, all_hidden_states, all_attentions, past_buckets_states, use_cache, orig_sequence_length, output_hidden_states, output_attentions, ) # Apply layer norm to concatenated hidden states hidden_states = self.layer_norm(hidden_states) # Apply dropout hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training) return ReformerEncoderOutput( hidden_states=hidden_states, all_hidden_states=all_hidden_states, all_attentions=all_attentions, past_buckets_states=past_buckets_states, ) class ReformerOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() # Reformer is using Rev Nets, thus last layer outputs are concatenated and # Layer Norm is done over 2 * hidden_size self.seq_len_dim = 1 self.chunk_size_lm_head = config.chunk_size_lm_head self.decoder = nn.Linear(2 * config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) self.decoder.bias = self.bias def forward(self, hidden_states): return apply_chunking_to_forward(self.forward_chunk, self.chunk_size_lm_head, self.seq_len_dim, hidden_states) def forward_chunk(self, hidden_states): hidden_states = self.decoder(hidden_states) return hidden_states def _tie_weights(self): # To tie those two weights if they get disconnected (on TPU or when the bias is resized) self.bias = self.decoder.bias class ReformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ReformerConfig base_model_prefix = "reformer" @property def dummy_inputs(self): input_ids = torch.tensor(DUMMY_INPUTS) input_mask = torch.tensor(DUMMY_MASK) dummy_inputs = { "input_ids": input_ids, "attention_mask": input_mask, } return dummy_inputs def _init_weights(self, module): """Initialize the weights""" if isinstance(module, AxialPositionEmbeddings): for weight in module.weights: nn.init.normal_(weight, std=self.config.axial_norm_std) elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class ReformerModelOutput(ModelOutput): """ Output type of [`ReformerModel`]. Args: last_hidden_state (`torch.FloatTensor` of shape `(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. past_buckets_states (`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `Tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class ReformerModelWithLMHeadOutput(ModelOutput): """ Output type of [`ReformerModelWithLMHead`]. Args: loss (`torch.FloatTensor` of shape *(1,)*, *optional*, returned when `labels` is provided) Language modeling loss (for next-token prediction). logits (`torch.FloatTensor` of shape `(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). `num_predict` corresponds to `target_mapping.shape[1]`. If `target_mapping` is `None`, then `num_predict` corresponds to `sequence_length`. past_buckets_states (`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `Tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed buckets and hidden-states that can be used (see `past_buckets_states` input) to speed up sequential decoding. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): TTuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None past_buckets_states: Optional[List[Tuple[torch.LongTensor, torch.FloatTensor]]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None REFORMER_START_DOCSTRING = r""" Reformer was proposed in [Reformer: The Efficient Transformer](https://arxiv.org/abs/2001.04451) by Nikita Kitaev, Łukasz Kaiser, Anselm Levskaya. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ReformerConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ REFORMER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. During training the input_ids sequence_length has to be a multiple of the relevant model's chunk lengths (lsh's, local's or both). During evaluation, the indices are automatically padded to be a multiple of the chunk length. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. num_hashes (`int`, *optional*): The number of hashing rounds that should be performed during bucketing. Setting this argument overwrites the default defined in `config.num_hashes`. For more information, see `num_hashes` in [`ReformerConfig`]. past_buckets_states (`List[Tuple(torch.LongTensor, torch.FloatTensor)]`, *optional*): List of `Tuple(torch.LongTensor, torch.FloatTensor` of length `config.n_layers`, with the first element being the previous *buckets* of shape `(batch_size, num_heads, num_hashes, sequence_length)`) and the second being the previous *hidden_states* of shape `(batch_size, sequence_length, hidden_size)`). Contains precomputed hidden-states and buckets (only relevant for LSH Self-Attention). Can be used to speed up sequential decoding. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Reformer Model transformer outputting raw hidden-stateswithout any specific head on top.", REFORMER_START_DOCSTRING, ) class ReformerModel(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.config = config assert ( self.config.num_hidden_layers > 0 ), "`config.attn_layers` is empty. Select at least one attn layer form ['lsh', 'local']" self.embeddings = ReformerEmbeddings(config) self.encoder = ReformerEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=ReformerModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, past_buckets_states: Optional[List[Tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ReformerModelOutput]: use_cache = use_cache if use_cache is not None else self.config.use_cache output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() # noqa: F841 device = input_ids.device elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] # noqa: F841 device = inputs_embeds.device else: raise ValueError("You have to specify either input_ids or inputs_embeds") assert ( len(input_shape) == 2 ), f"`input_ids` have be of shape `[batch_size, sequence_length]`, but got shape: {input_shape}" if past_buckets_states is not None: assert not self.training, "`past_buckets_states` can only be used for inference, not for training`." # prepare head mask head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers, is_attention_chunked=True) # original sequence length for padding orig_sequence_length = input_shape[-1] # if needs padding least_common_mult_chunk_length = _get_least_common_mult_chunk_len(self.config) min_chunk_length = _get_min_chunk_len(self.config) must_pad_to_match_chunk_length = ( input_shape[-1] % least_common_mult_chunk_length != 0 and input_shape[-1] > min_chunk_length and past_buckets_states is None ) if must_pad_to_match_chunk_length: padding_length = least_common_mult_chunk_length - input_shape[-1] % least_common_mult_chunk_length if self.training is True: raise ValueError( f"If training, sequence length {input_shape[-1]} has to be a multiple of least common multiple " f"chunk_length {least_common_mult_chunk_length}. Please consider padding the input to a length " f"of {input_shape[-1] + padding_length}." ) # pad input input_ids, inputs_embeds, attention_mask, position_ids, input_shape = self._pad_to_mult_of_chunk_length( input_ids, inputs_embeds=inputs_embeds, attention_mask=attention_mask, position_ids=position_ids, input_shape=input_shape, padding_length=padding_length, padded_seq_length=least_common_mult_chunk_length, device=device, ) # start index for position encoding depends on incremental decoding if past_buckets_states is not None: start_idx_pos_encodings = past_buckets_states[0][1].shape[1] else: start_idx_pos_encodings = 0 embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, start_idx_pos_encodings=start_idx_pos_encodings, ) encoder_outputs = self.encoder( hidden_states=embedding_output, head_mask=head_mask, attention_mask=attention_mask, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, orig_sequence_length=orig_sequence_length, output_hidden_states=output_hidden_states, output_attentions=output_attentions, ) sequence_output = encoder_outputs.hidden_states # if padding was applied if must_pad_to_match_chunk_length: sequence_output = sequence_output[:, :orig_sequence_length] past_buckets_states = encoder_outputs.past_buckets_states if use_cache else None hidden_states = encoder_outputs.all_hidden_states if output_hidden_states else None attentions = encoder_outputs.all_attentions if output_attentions else None if not return_dict: return tuple(v for v in [sequence_output, past_buckets_states, hidden_states, attentions] if v is not None) return ReformerModelOutput( last_hidden_state=sequence_output, past_buckets_states=past_buckets_states, hidden_states=hidden_states, attentions=attentions, ) def _pad_to_mult_of_chunk_length( self, input_ids, inputs_embeds=None, attention_mask=None, position_ids=None, input_shape=None, padding_length=None, padded_seq_length=None, device=None, ): logger.info( f"Input ids are automatically padded from {input_shape[-1]} to {input_shape[-1] + padding_length} to be a " f"multiple of `config.chunk_length`: {padded_seq_length}" ) padded_input_ids = torch.full( (input_shape[0], padding_length), self.config.pad_token_id, device=device, dtype=torch.long, ) # Extend `attention_mask` if attention_mask is not None: pad_attention_mask = torch.zeros(input_shape[0], padding_length, device=device, dtype=attention_mask.dtype) attention_mask = torch.cat([attention_mask, pad_attention_mask], dim=-1) else: attention_mask = torch.cat( [ torch.ones(input_shape, device=device, dtype=torch.bool), torch.zeros((input_shape[0], padding_length), device=device, dtype=torch.bool), ], dim=-1, ) # Extend `input_ids` with padding to match least common multiple chunk_length if input_ids is not None: input_ids = torch.cat([input_ids, padded_input_ids], dim=-1) input_shape = input_ids.size() # Pad position ids if given if position_ids is not None: padded_position_ids = torch.arange(input_shape[-1], padded_seq_length, dtype=torch.long, device=device) padded_position_ids = position_ids.unsqueeze(0).expand(input_shape[0], padding_length) position_ids = torch.cat([position_ids, padded_position_ids], dim=-1) # Extend `inputs_embeds` with padding to match least common multiple chunk_length if inputs_embeds is not None: padded_inputs_embeds = self.embeddings(padded_input_ids, position_ids) inputs_embeds = torch.cat([inputs_embeds, padded_inputs_embeds], dim=-2) input_shape = inputs_embeds.size() return input_ids, inputs_embeds, attention_mask, position_ids, input_shape @add_start_docstrings("""Reformer Model with a `language modeling` head on top.""", REFORMER_START_DOCSTRING) class ReformerModelWithLMHead(ReformerPreTrainedModel): _keys_to_ignore_on_load_missing = ["lm_head.decoder.bias"] def __init__(self, config): super().__init__(config) assert config.is_decoder, "If you want to use `ReformerModelWithLMHead` make sure that `is_decoder=True`." assert "local" not in self.config.attn_layers or config.local_num_chunks_after == 0, ( "If causal mask is enabled, make sure that `config.local_num_chunks_after` is set to 0 and not" f" {config.local_num_chunks_after}." ) assert "lsh" not in self.config.attn_layers or config.lsh_num_chunks_after == 0, ( "If causal mask is enabled, make sure that `config.lsh_num_chunks_after` is set to 1 and not" f" {config.lsh_num_chunks_after}." ) self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, past_buckets_states: Optional[List[Tuple[torch.Tensor]]] = None, use_cache: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, past_buckets_states=past_buckets_states, use_cache=use_cache, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.lm_head(sequence_output) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(shift_logits.view(-1, self.config.vocab_size), shift_labels.view(-1)) if not return_dict: output = (logits,) + reformer_outputs[1:] return ((loss,) + output) if loss is not None else output return ReformerModelWithLMHeadOutput( loss=loss, logits=logits, past_buckets_states=reformer_outputs.past_buckets_states, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, use_cache=None, num_hashes=None, **kwargs ): # only last token for inputs_ids if past is defined in kwargs if past_key_values is not None: input_ids = input_ids[:, -1:] inputs_dict = { "input_ids": input_ids, "past_buckets_states": past_key_values, "use_cache": use_cache, "num_hashes": num_hashes, } return inputs_dict def _reorder_cache(self, past_key_values, beam_idx): reord_past_buckets_states = [] for layer_past in past_key_values: # buckets if layer_past[0] is not None: reord_buckets = layer_past[0].index_select(0, beam_idx) else: reord_buckets = None # hidden states reord_hidden_states = layer_past[1].index_select(0, beam_idx) reord_past_buckets_states.append((reord_buckets, reord_hidden_states)) return reord_past_buckets_states @add_start_docstrings("""Reformer Model with a `language modeling` head on top.""", REFORMER_START_DOCSTRING) class ReformerForMaskedLM(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) assert not config.is_decoder, ( "If you want to use `ReformerForMaskedLM` make sure `config.is_decoder=False` for bi-directional" " self-attention." ) self.reformer = ReformerModel(config) self.lm_head = ReformerOnlyLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels Returns: <Tip warning={true}> This example uses a false checkpoint since we don't have any available pretrained model for the masked language modeling task with the Reformer architecture. </Tip> Example: ```python >>> import torch >>> from transformers import AutoTokenizer, ReformerForMaskedLM >>> tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-reformer") >>> model = ReformerForMaskedLM.from_pretrained("hf-internal-testing/tiny-random-reformer") >>> # add mask_token >>> tokenizer.add_special_tokens({"mask_token": "[MASK]"}) # doctest: +IGNORE_RESULT >>> inputs = tokenizer("The capital of France is [MASK].", return_tensors="pt") >>> # resize model's embedding matrix >>> model.resize_token_embeddings(new_num_tokens=model.config.vocab_size + 1) # doctest: +IGNORE_RESULT >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of [MASK] >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> predicted_token = tokenizer.decode(predicted_token_id) ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> # mask labels of non-[MASK] tokens >>> labels = torch.where( ... inputs.input_ids == tokenizer.mask_token_id, labels[:, : inputs["input_ids"].shape[-1]], -100 ... ) >>> outputs = model(**inputs, labels=labels) >>> loss = round(outputs.loss.item(), 2) ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, # no causal mask output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (logits,) + reformer_outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, ) @add_start_docstrings( """ Reformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, REFORMER_START_DOCSTRING, ) class ReformerForSequenceClassification(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.config = config self.reformer = ReformerModel(config) self.classifier = ReformerClassificationHead(config) if config.is_decoder is True: logger.warning("You might want to disable causal masking for sequence classification") # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, labels: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Returns: Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, ReformerForSequenceClassification >>> tokenizer = AutoTokenizer.from_pretrained("google/reformer-crime-and-punishment") >>> model = ReformerForSequenceClassification.from_pretrained("google/reformer-crime-and-punishment") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> label = model.config.id2label[predicted_class_id] ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = ReformerForSequenceClassification.from_pretrained( ... "google/reformer-crime-and-punishment", num_labels=num_labels ... ) >>> labels = torch.tensor(1) >>> loss = model(**inputs, labels=labels).loss ``` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class ReformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size) classifier_dropout = ( config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob ) self.dropout = nn.Dropout(classifier_dropout) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, hidden_states, **kwargs): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.out_proj(hidden_states) return hidden_states @add_start_docstrings( """ Reformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA ( a linear layer on top of hidden-states output to compute `span start logits` and `span end logits`. """, REFORMER_START_DOCSTRING, ) class ReformerForQuestionAnswering(ReformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.reformer = ReformerModel(config) # 2 * config.hidden_size because we use reversible residual layers self.qa_outputs = nn.Linear(2 * config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(REFORMER_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, num_hashes: Optional[int] = None, start_positions: Optional[torch.Tensor] = None, end_positions: Optional[torch.Tensor] = None, output_hidden_states: Optional[bool] = None, output_attentions: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict reformer_outputs = self.reformer( input_ids, position_ids=position_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, num_hashes=num_hashes, use_cache=False, # no causal mask output_hidden_states=output_hidden_states, output_attentions=output_attentions, return_dict=return_dict, ) sequence_output = reformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1).contiguous() end_logits = end_logits.squeeze(-1).contiguous() total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions = start_positions.clamp(0, ignored_index) end_positions = end_positions.clamp(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + reformer_outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=reformer_outputs.hidden_states, attentions=reformer_outputs.attentions, )
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/tapas/modeling_tapas.py
# coding=utf-8 # Copyright 2020 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch TAPAS model.""" import enum import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_tapas import TapasConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "TapasConfig" _CHECKPOINT_FOR_DOC = "google/tapas-base" TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = [ # large models "google/tapas-large", "google/tapas-large-finetuned-sqa", "google/tapas-large-finetuned-wtq", "google/tapas-large-finetuned-wikisql-supervised", "google/tapas-large-finetuned-tabfact", # base models "google/tapas-base", "google/tapas-base-finetuned-sqa", "google/tapas-base-finetuned-wtq", "google/tapas-base-finetuned-wikisql-supervised", "google/tapas-base-finetuned-tabfact", # small models "google/tapas-small", "google/tapas-small-finetuned-sqa", "google/tapas-small-finetuned-wtq", "google/tapas-small-finetuned-wikisql-supervised", "google/tapas-small-finetuned-tabfact", # mini models "google/tapas-mini", "google/tapas-mini-finetuned-sqa", "google/tapas-mini-finetuned-wtq", "google/tapas-mini-finetuned-wikisql-supervised", "google/tapas-mini-finetuned-tabfact", # tiny models "google/tapas-tiny", "google/tapas-tiny-finetuned-sqa", "google/tapas-tiny-finetuned-wtq", "google/tapas-tiny-finetuned-wikisql-supervised", "google/tapas-tiny-finetuned-tabfact", # See all TAPAS models at https://huggingface.co/models?filter=tapas ] EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 @dataclass class TableQuestionAnsweringOutput(ModelOutput): """ Output type of [`TapasForQuestionAnswering`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)): Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Prediction scores of the cell selection head, for every token. logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`): Prediction scores of the aggregation head, for every aggregation operator. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_aggregation: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None def load_tf_weights_in_tapas(model, config, tf_checkpoint_path): """ Load tf checkpoints in a PyTorch model. This is an adaptation from load_tf_weights_in_bert - add cell selection and aggregation heads - take into account additional token type embedding layers """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculate m and v # which are not required for using pretrained model if any( n in [ "adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step", "seq_relationship", ] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasForSequenceClassification, we skip output_bias and output_weights # since these are not used for classification if isinstance(model, TapasForSequenceClassification): if any(n in ["output_bias", "output_weights"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasModel, we skip output_bias, output_weights, output_bias_cls and output_weights_cls # since this model does not have MLM and NSP heads if isinstance(model, TapasModel): if any(n in ["output_bias", "output_weights", "output_bias_cls", "output_weights_cls"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasForMaskedLM, we skip the pooler if isinstance(model, TapasForMaskedLM): if any(n in ["pooler"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # if first scope name starts with "bert", change it to "tapas" if name[0] == "bert": name[0] = "tapas" pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "beta": pointer = getattr(pointer, "bias") # cell selection heads elif scope_names[0] == "output_bias": if not isinstance(model, TapasForMaskedLM): pointer = getattr(pointer, "output_bias") else: pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "output_weights") elif scope_names[0] == "column_output_bias": pointer = getattr(pointer, "column_output_bias") elif scope_names[0] == "column_output_weights": pointer = getattr(pointer, "column_output_weights") # aggregation head elif scope_names[0] == "output_bias_agg": pointer = getattr(pointer, "aggregation_classifier") pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights_agg": pointer = getattr(pointer, "aggregation_classifier") pointer = getattr(pointer, "weight") # classification head elif scope_names[0] == "output_bias_cls": pointer = getattr(pointer, "classifier") pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights_cls": pointer = getattr(pointer, "classifier") pointer = getattr(pointer, "weight") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name[-13:] in [f"_embeddings_{i}" for i in range(7)]: pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") # Added a check to see whether the array is a scalar (because bias terms in Tapas checkpoints can be # scalar => should first be converted to numpy arrays) if np.isscalar(array): array = np.array(array) pointer.data = torch.from_numpy(array) return model class TapasEmbeddings(nn.Module): """ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of additional token type embeddings to encode tabular structure. """ def __init__(self, config): super().__init__() # we do not include config.disabled_features and config.disable_position_embeddings from the original implementation # word embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) # position embeddings self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # token type embeddings for i, type_vocab_sizes in enumerate(config.type_vocab_sizes): name = f"token_type_embeddings_{i}" setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size)) self.number_of_token_type_embeddings = len(config.type_vocab_sizes) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: # create absolute position embeddings position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings if self.config.reset_position_index_per_cell: # shape (batch_size, seq_len) col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1) # shape (batch_size, seq_len) row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1) # shape (batch_size, seq_len) full_index = ProductIndexMap(col_index, row_index) # shape (max_rows * max_columns,). First absolute position for every cell first_position_per_segment = reduce_min(position_ids, full_index)[0] # ? shape (batch_size, seq_len). First absolute position of the cell for every token first_position = gather(first_position_per_segment, full_index) # shape (1, seq_len) position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0) position_ids = torch.min( torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position ) if token_type_ids is None: token_type_ids = torch.zeros( (input_shape + self.number_of_token_type_embeddings), dtype=torch.long, device=device ) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f"token_type_embeddings_{i}" embeddings += getattr(self, name)(token_type_ids[:, :, i]) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class TapasSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TapasModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class TapasSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TapasAttention(nn.Module): def __init__(self, config): super().__init__() self.self = TapasSelfAttention(config) self.output = TapasSelfOutput(config) self.pruned_heads = set() # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) # Copied from transformers.models.bert.modeling_bert.BertAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class TapasIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class TapasOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TapasLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TapasAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TapasAttention(config) self.intermediate = TapasIntermediate(config) self.output = TapasOutput(config) # Copied from transformers.models.bert.modeling_bert.BertLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class TapasEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([TapasLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_values, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) # Copied from transformers.models.bert.modeling_bert.BertPooler class TapasPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Tapas class TapasPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Tapas class TapasLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = TapasPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Tapas class TapasOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = TapasLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class TapasPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TapasConfig base_model_prefix = "tapas" supports_gradient_checkpointing = True # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, TapasEncoder): module.gradient_checkpointing = value TAPAS_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TapasConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TAPAS_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0}, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.", TAPAS_START_DOCSTRING, ) class TapasModel(TapasPreTrainedModel): """ This class is a small change compared to [`BertModel`], taking into account the additional token type ids. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = TapasEmbeddings(config) self.encoder = TapasEncoder(config) self.pooler = TapasPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasModel >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasModel.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros( (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING) class TapasForMaskedLM(TapasPreTrainedModel): _keys_to_ignore_on_load_missing = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] config_class = TapasConfig base_model_prefix = "tapas" def __init__(self, config): super().__init__(config) self.tapas = TapasModel(config, add_pooling_layer=False) self.cls = TapasOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForMaskedLM >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> inputs = tokenizer( ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt" ... ) >>> labels = tokenizer( ... table=table, queries="How many movies has George Clooney played in?", return_tensors="pt" ... )["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for SQA, WTQ or WikiSQL-supervised tasks. """, TAPAS_START_DOCSTRING, ) class TapasForQuestionAnswering(TapasPreTrainedModel): def __init__(self, config: TapasConfig): super().__init__(config) # base model self.tapas = TapasModel(config) # dropout (only used when training) self.dropout = nn.Dropout(config.hidden_dropout_prob) # cell selection heads if config.init_cell_selection_weights_to_zero: # init_cell_selection_weights_to_zero: Whether the initial weights should be # set to 0. This ensures that all tokens have the same prior probability. self.output_weights = nn.Parameter(torch.zeros(config.hidden_size)) self.column_output_weights = nn.Parameter(torch.zeros(config.hidden_size)) else: self.output_weights = nn.Parameter(torch.empty(config.hidden_size)) nn.init.normal_( self.output_weights, std=config.initializer_range ) # here, a truncated normal is used in the original implementation self.column_output_weights = nn.Parameter(torch.empty(config.hidden_size)) nn.init.normal_( self.column_output_weights, std=config.initializer_range ) # here, a truncated normal is used in the original implementation self.output_bias = nn.Parameter(torch.zeros([])) self.column_output_bias = nn.Parameter(torch.zeros([])) # aggregation head if config.num_aggregation_labels > 0: self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, table_mask: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, aggregation_labels: Optional[torch.LongTensor] = None, float_answer: Optional[torch.FloatTensor] = None, numeric_values: Optional[torch.FloatTensor] = None, numeric_values_scale: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TableQuestionAnsweringOutput]: r""" table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*): Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0. labels (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*): Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using [`AutoTokenizer`]. - 1 for tokens that are **part of the answer**, - 0 for tokens that are **not part of the answer**. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`, *optional*): Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for aggregation (WikiSQL-supervised). float_answer (`torch.FloatTensor` of shape `(batch_size, )`, *optional*): Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*): Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*): Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForQuestionAnswering >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits_aggregation = outputs.logits_aggregation ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] pooled_output = outputs[1] sequence_output = self.dropout(sequence_output) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device # Construct indices for the table. if token_type_ids is None: token_type_ids = torch.zeros( (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device ) token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] row_ids = token_type_ids[:, :, token_types.index("row_ids")] column_ids = token_type_ids[:, :, token_types.index("column_ids")] row_index = IndexMap( indices=torch.min(row_ids, torch.as_tensor(self.config.max_num_rows - 1, device=row_ids.device)), num_segments=self.config.max_num_rows, batch_dims=1, ) col_index = IndexMap( indices=torch.min(column_ids, torch.as_tensor(self.config.max_num_columns - 1, device=column_ids.device)), num_segments=self.config.max_num_columns, batch_dims=1, ) cell_index = ProductIndexMap(row_index, col_index) # Masks. input_shape = input_ids.size() if input_ids is not None else inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) # Table cells only, without question tokens and table headers. if table_mask is None: table_mask = torch.where(row_ids > 0, torch.ones_like(row_ids), torch.zeros_like(row_ids)) # torch.FloatTensor[batch_size, seq_length] input_mask_float = attention_mask.float().to(device) table_mask_float = table_mask.float().to(device) # Mask for cells that exist in the table (i.e. that are not padding). cell_mask, _ = reduce_mean(input_mask_float, cell_index) # Compute logits per token. These are used to select individual cells. logits = compute_token_logits(sequence_output, self.config.temperature, self.output_weights, self.output_bias) # Compute logits per column. These are used to select a column. column_logits = None if self.config.select_one_column: column_logits = compute_column_logits( sequence_output, self.column_output_weights, self.column_output_bias, cell_index, cell_mask, self.config.allow_empty_column_selection, ) # Aggregation logits logits_aggregation = None if self.config.num_aggregation_labels > 0: logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation total_loss = 0.0 calculate_loss = False if labels is not None: calculate_loss = True is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision # Semi-supervised cell selection in case of no aggregation: # If the answer (the denotation) appears directly in the table we might # select the answer without applying any aggregation function. There are # some ambiguous cases, see utils._calculate_aggregate_mask for more info. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0 # for examples where we chose to select the answer directly. # `labels` encodes the positions of the answer appearing in the table. if is_supervised: aggregate_mask = None else: if float_answer is not None: assert ( labels.shape[0] == float_answer.shape[0] ), "Make sure the answers are a FloatTensor of shape (batch_size,)" # <float32>[batch_size] aggregate_mask = _calculate_aggregate_mask( float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier, ) else: raise ValueError("You have to specify float answers in order to calculate the aggregate mask") # Cell selection log-likelihood if self.config.average_logits_per_cell: logits_per_cell, _ = reduce_mean(logits, cell_index) logits = gather(logits_per_cell, cell_index) dist_per_token = torch.distributions.Bernoulli(logits=logits) # Compute cell selection loss per example. selection_loss_per_example = None if not self.config.select_one_column: weight = torch.where( labels == 0, torch.ones_like(labels, dtype=torch.float32), self.config.positive_label_weight * torch.ones_like(labels, dtype=torch.float32), ) selection_loss_per_token = -dist_per_token.log_prob(labels) * weight selection_loss_per_example = torch.sum(selection_loss_per_token * input_mask_float, dim=1) / ( torch.sum(input_mask_float, dim=1) + EPSILON_ZERO_DIVISION ) else: selection_loss_per_example, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) dist_per_token = torch.distributions.Bernoulli(logits=logits) # Supervised cell selection if self.config.disable_per_token_loss: pass elif is_supervised: total_loss += torch.mean(selection_loss_per_example) else: # For the not supervised case, do not assign loss for cell selection total_loss += torch.mean(selection_loss_per_example * (1.0 - aggregate_mask)) # Semi-supervised regression loss and supervised loss for aggregations if self.config.num_aggregation_labels > 0: if is_supervised: # Note that `aggregate_mask` is None if the setting is supervised. if aggregation_labels is not None: assert ( labels.shape[0] == aggregation_labels.shape[0] ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)" per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) else: raise ValueError( "You have to specify aggregation labels in order to calculate the aggregation loss" ) else: # Set aggregation labels to zeros aggregation_labels = torch.zeros(labels.shape[0], dtype=torch.long, device=labels.device) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) if self.config.use_answer_as_supervision: if numeric_values is not None and numeric_values_scale is not None: assert numeric_values.shape == numeric_values_scale.shape # Add regression loss for numeric answers which require aggregation. answer_loss, large_answer_loss_mask = _calculate_regression_loss( float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config, ) per_example_additional_loss += answer_loss # Zero loss for examples with answer_loss > cutoff. per_example_additional_loss *= large_answer_loss_mask else: raise ValueError( "You have to specify numeric values and numeric values scale in order to calculate the" " regression loss" ) total_loss += torch.mean(per_example_additional_loss) else: # if no label ids are provided, set them to zeros in order to properly compute logits labels = torch.zeros_like(logits) _, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) if not return_dict: output = (logits, logits_aggregation) + outputs[2:] return ((total_loss,) + output) if calculate_loss else output return TableQuestionAnsweringOutput( loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020). """, TAPAS_START_DOCSTRING, ) class TapasForSequenceClassification(TapasPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.tapas = TapasModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called "classification_class_index" in the original implementation. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForSequenceClassification >>> import torch >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact") >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = [ ... "There is only one actor who is 45 years old", ... "There are 3 actors which played in more than 60 movies", ... ] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) """ TAPAS utilities.""" class AverageApproximationFunction(str, enum.Enum): RATIO = "ratio" FIRST_ORDER = "first_order" SECOND_ORDER = "second_order" # Beginning of everything related to segmented tensors class IndexMap(object): """Index grouping entries within a tensor.""" def __init__(self, indices, num_segments, batch_dims=0): """ Creates an index Args: indices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer): Tensor containing the indices. num_segments (`torch.LongTensor`): Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same number of segments (although many segments can be empty). batch_dims (`int`, *optional*, defaults to 0): The number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch elements are always distinct even if they have the same index. """ self.indices = torch.as_tensor(indices) self.num_segments = torch.as_tensor(num_segments, device=indices.device) self.batch_dims = batch_dims def batch_shape(self): return self.indices.size()[: self.batch_dims] # returns a torch.Size object class ProductIndexMap(IndexMap): """The product of two indices.""" def __init__(self, outer_index, inner_index): """ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to *outer_index.num_segments* * *inner_index.num_segments* Args: outer_index (`IndexMap`): IndexMap. inner_index (`IndexMap`): IndexMap, must have the same shape as *outer_index*. """ if outer_index.batch_dims != inner_index.batch_dims: raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.") super().__init__( indices=(inner_index.indices + outer_index.indices * inner_index.num_segments), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims, ) self.outer_index = outer_index self.inner_index = inner_index def project_outer(self, index): """Projects an index with the same index set onto the outer components.""" return IndexMap( indices=(index.indices // self.inner_index.num_segments).type(torch.float).floor().type(torch.long), num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims, ) def project_inner(self, index): """Projects an index with the same index set onto the inner components.""" return IndexMap( indices=torch.fmod(index.indices, self.inner_index.num_segments) .type(torch.float) .floor() .type(torch.long), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims, ) def gather(values, index, name="segmented_gather"): """ Gathers from *values* using the index map. For each element in the domain of the index map this operation looks up a value for that index in *values*. Two elements from the same segment always get assigned the same value. Args: values (`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)): Tensor with segment values. index (`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)): IndexMap. name (`str`, *optional*, defaults to 'segmented_gather'): Name for the operation. Currently not used Returns: `tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values. """ indices = index.indices # first, check whether the indices of the index represent scalar values (i.e. not vectorized) if len(values.shape[index.batch_dims :]) < 2: return torch.gather( values, index.batch_dims, indices.view( values.size()[0], -1 ), # torch.gather expects index to have the same number of dimensions as values ).view(indices.size()) else: # this means we have a vectorized version # we have to adjust the index indices = indices.unsqueeze(-1).expand(values.shape) return torch.gather(values, index.batch_dims, indices) def flatten(index, name="segmented_flatten"): """ Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by *num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the batch. Args: index (`IndexMap`): IndexMap to flatten. name (`str`, *optional*, defaults to 'segmented_flatten'): Name for the operation. Currently not used Returns: (`IndexMap`): The flattened IndexMap. """ # first, get batch_size as scalar tensor batch_size = torch.prod(torch.tensor(list(index.batch_shape()))) # next, create offset as 1-D tensor of length batch_size, # and multiply element-wise by num segments (to offset different elements in the batch) e.g. if batch size is 2: [0, 64] offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments offset = offset.view(index.batch_shape()) for _ in range(index.batch_dims, len(index.indices.size())): # typically range(1,2) offset = offset.unsqueeze(-1) indices = offset + index.indices return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0) def range_index_map(batch_shape, num_segments, name="range_index_map"): """ Constructs an index map equal to range(num_segments). Args: batch_shape (`torch.Size`): Batch shape num_segments (`int`): Number of segments name (`str`, *optional*, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ batch_shape = torch.as_tensor( batch_shape, dtype=torch.long ) # create a rank 1 tensor vector containing batch_shape (e.g. [2]) assert len(batch_shape.size()) == 1 num_segments = torch.as_tensor(num_segments) # create a rank 0 tensor (scalar) containing num_segments (e.g. 64) assert len(num_segments.size()) == 0 indices = torch.arange( start=0, end=num_segments, device=num_segments.device ) # create a rank 1 vector with num_segments elements new_tensor = torch.cat( [torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)], dim=0, ) # new_tensor is just a vector of [1 64] for example (assuming only 1 batch dimension) new_shape = [int(x) for x in new_tensor.tolist()] indices = indices.view(new_shape) multiples = torch.cat([batch_shape, torch.as_tensor([1])], dim=0) indices = indices.repeat(multiples.tolist()) # equivalent (in Numpy:) # indices = torch.as_tensor(np.tile(indices.numpy(), multiples.tolist())) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0]) def _segment_reduce(values, index, segment_reduce_fn, name): """ Applies a segment reduction segment-wise. Args: values (`torch.Tensor`): Tensor with segment values. index (`IndexMap`): IndexMap. segment_reduce_fn (`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (`str`): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ # Flatten the batch dimensions, as segments ops (scatter) do not support batching. # However if `values` has extra dimensions to the right keep them # unflattened. Segmented ops support vector-valued operations. flat_index = flatten(index) vector_shape = values.size()[len(index.indices.size()) :] # torch.Size object flattened_shape = torch.cat( [torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0 ) # changed "view" by "reshape" in the following line flat_values = values.reshape(flattened_shape.tolist()) out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device) segment_means = out.scatter_reduce( dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False ) # Unflatten the values. new_shape = torch.cat( [ torch.as_tensor(index.batch_shape(), dtype=torch.long), torch.as_tensor([index.num_segments], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long), ], dim=0, ) output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype) output_index = range_index_map(index.batch_shape(), index.num_segments) return output_values, output_index def reduce_sum(values, index, name="segmented_reduce_sum"): """ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a sum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the sum must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. . """ return _segment_reduce(values, index, "sum", name) def reduce_mean(values, index, name="segmented_reduce_mean"): """ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a mean of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the mean must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "mean", name) def reduce_max(values, index, name="segmented_reduce_max"): """ Computes the maximum over segments. This operation computes the maximum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise maximum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the max must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "amax", name) def reduce_min(values, index, name="segmented_reduce_min"): """ Computes the minimum over segments. This operations computes the minimum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise minimum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the min must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "amin", name) # End of everything related to segmented tensors def compute_column_logits( sequence_output, column_output_weights, column_output_bias, cell_index, cell_mask, allow_empty_column_selection ): """ Computes the column logits. Args: sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. column_output_weights (`torch.FloatTensor` of shape `(hidden_size)`): Weights of the linear layer for column selection. column_output_bias (`torch.FloatTensor` of shape `()`): Bias of the linear layer for column selection. cell_index (`ProductIndexMap`): Index that groups tokens into cells. cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (`bool`): Whether to allow not to select any column Returns: column_logits (`torch.FloatTensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch. """ # First, compute the token logits (batch_size, seq_len) - without temperature token_logits = torch.einsum("bsj,j->bs", sequence_output, column_output_weights) + column_output_bias # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows) cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) # Finally, average the logits per column (batch_size, max_num_cols) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION # Mask columns that do not appear in the example. is_padding = torch.logical_and(cell_count < 0.5, ~torch.eq(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( is_padding, dtype=torch.float32, device=is_padding.device ) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( torch.eq(out_index.indices, 0), dtype=torch.float32, device=out_index.indices.device ) return column_logits def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask): """ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside the selected column are never selected. Args: token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits per token. column_logits (`torch.FloatTensor` of shape `(batch_size, max_num_cols)`): Tensor containing the logits per column. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Labels per token. cell_index (`ProductIndexMap`): Index that groups tokens into cells. col_index (`IndexMap`): Index that groups tokens into columns. cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). Returns: selection_loss_per_example (`torch.FloatTensor` of shape `(batch_size,)`): Loss for each example. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to a very low value (such that the probabilities are 0). """ # Part 1: column loss # First find the column we should select. We use the column with maximum number of selected cells. labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index) # shape of labels_per_column is (batch_size, max_num_cols). It contains the number of label ids for every column, for every example column_label = torch.argmax(labels_per_column, dim=-1) # shape (batch_size,) # Check if there are no selected cells in the column. In that case the model # should predict the special column id 0, which means "select nothing". no_cell_selected = torch.eq( torch.max(labels_per_column, dim=-1)[0], 0 ) # no_cell_selected is of shape (batch_size,) and equals True # if an example of the batch has no cells selected (i.e. if there are no labels set to 1 for that example) column_label = torch.where( no_cell_selected.view(column_label.size()), torch.zeros_like(column_label), column_label ) column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols) column_loss_per_example = -column_dist.log_prob(column_label) # Part 2: cell loss # Reduce the labels and logits to per-cell from per-token. # logits_per_cell: shape (batch_size, max_num_rows*max_num_cols) i.e. (batch_size, 64*32) logits_per_cell, _ = reduce_mean(token_logits, cell_index) # labels_per_cell: shape (batch_size, 64*32), indicating whether each cell should be selected (1) or not (0) labels_per_cell, labels_index = reduce_max( torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index ) # Mask for the selected column. # column_id_for_cells: shape (batch_size, 64*32), indicating to which column each cell belongs column_id_for_cells = cell_index.project_inner(labels_index).indices # column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column to be selected column_mask = torch.as_tensor( torch.eq(column_id_for_cells, torch.unsqueeze(column_label, dim=-1)), dtype=torch.float32, device=cell_mask.device, ) # Compute the log-likelihood for cells, but only for the selected column. cell_dist = torch.distributions.Bernoulli(logits=logits_per_cell) # shape (batch_size, 64*32) cell_log_prob = cell_dist.log_prob(labels_per_cell.type(torch.float32)) # shape(batch_size, 64*32) cell_loss = -torch.sum(cell_log_prob * column_mask * cell_mask, dim=1) # We need to normalize the loss by the number of cells in the column. cell_loss /= torch.sum(column_mask * cell_mask, dim=1) + EPSILON_ZERO_DIVISION selection_loss_per_example = column_loss_per_example selection_loss_per_example += torch.where( no_cell_selected.view(selection_loss_per_example.size()), torch.zeros_like(selection_loss_per_example), cell_loss, ) # Set the probs outside the selected column (selected by the *model*) # to 0. This ensures backwards compatibility with models that select # cells from multiple columns. selected_column_id = torch.as_tensor( torch.argmax(column_logits, dim=-1), dtype=torch.long, device=column_logits.device ) # shape (batch_size,) # selected_column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column selected by the model selected_column_mask = torch.as_tensor( torch.eq(column_id_for_cells, torch.unsqueeze(selected_column_id, dim=-1)), dtype=torch.float32, device=selected_column_id.device, ) # Never select cells with the special column id 0. selected_column_mask = torch.where( torch.eq(column_id_for_cells, 0).view(selected_column_mask.size()), torch.zeros_like(selected_column_mask), selected_column_mask, ) new_logits_per_cell = logits_per_cell + CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask) logits = gather(new_logits_per_cell, cell_index) return selection_loss_per_example, logits def compute_token_logits(sequence_output, temperature, output_weights, output_bias): """ Computes logits per token Args: sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. temperature (`float`): Temperature for the Bernoulli distribution. output_weights (`torch.FloatTensor` of shape `(hidden_size,)`): Weights of the linear layer for cell selection. output_bias (`torch.FloatTensor` of shape `()`): Bias of the linear layer for cell selection Returns: logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token. """ logits = (torch.einsum("bsj,j->bs", sequence_output, output_weights) + output_bias) / temperature return logits def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier): """ Finds examples where the model should select cells with no aggregation. Returns a mask that determines for which examples should the model select answers directly from the table, without any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold for this is a hyperparameter *cell_selection_preference* Args: answer (`torch.FloatTensor` of shape `(batch_size, )`): Answer for every example in the batch. Nan if there is no scalar answer. pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Output of the pooler (BertPooler) on top of the encoder layer. cell_selection_preference (`float`): Preference for cell selection in ambiguous cases. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head Returns: aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. """ # torch.FloatTensor(batch_size,) aggregate_mask_init = torch.logical_not(torch.isnan(answer)).type(torch.FloatTensor).to(answer.device) logits_aggregation = aggregation_classifier(pooled_output) dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1) # Cell selection examples according to current model. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference # Examples with non-empty cell selection supervision. is_cell_supervision_available = torch.sum(labels, dim=1) > 0 # torch.where is not equivalent to tf.where (in tensorflow 1) # hence the added .view on the condition to match the shape of the first tensor aggregate_mask = torch.where( torch.logical_and(is_pred_cell_selection, is_cell_supervision_available).view(aggregate_mask_init.size()), torch.zeros_like(aggregate_mask_init, dtype=torch.float32), aggregate_mask_init, ) aggregate_mask = aggregate_mask.detach() return aggregate_mask def _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ): """ Calculates aggregation loss when its type is known during training. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation" should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting where aggregation type is always known, standard cross entropy loss is accumulated for all examples Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. Returns: aggregation_loss_known (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (when its type is known during training) per example. """ if use_answer_as_supervision: # Prepare "no aggregation" targets for cell selection examples. target_aggregation = torch.zeros_like(aggregate_mask, dtype=torch.long) else: # Use aggregation supervision as the target. target_aggregation = aggregation_labels one_hot_labels = nn.functional.one_hot(target_aggregation, num_classes=num_aggregation_labels).type(torch.float32) log_probs = nn.functional.log_softmax(logits_aggregation, dim=-1) # torch.FloatTensor[batch_size] per_example_aggregation_intermediate = -torch.sum(one_hot_labels * log_probs, dim=-1) if use_answer_as_supervision: # Accumulate loss only for examples requiring cell selection # (no aggregation). return per_example_aggregation_intermediate * (1 - aggregate_mask) else: return per_example_aggregation_intermediate def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): """ Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (in case of answer supervision) per example. """ dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1) # Predict some aggregation in case of an answer that needs aggregation. # This increases the probability of all aggregation functions, in a way # similar to MML, but without considering whether the function gives the # correct answer. return -torch.log(aggregation_ops_total_mass) * aggregate_mask def _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight, ): """ Calculates the aggregation loss per example. Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (`float`, *optional*, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss per example. """ per_example_aggregation_loss = _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ) if use_answer_as_supervision: # Add aggregation loss for numeric answers that need aggregation. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss def _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ): """ Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the hyperparameters of the model Returns: expected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example. """ if config.use_gumbel_for_cells: gumbel_dist = torch.distributions.RelaxedBernoulli( # The token logits where already divided by the temperature and used for # computing cell selection errors so we need to multiply it again here temperature=config.temperature, logits=dist_per_cell.logits * config.temperature, ) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs # <float32>[batch_size, seq_length] scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float count_result = torch.sum(scaled_probability_per_cell, dim=1) numeric_values_masked = torch.where( torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values ) # Mask non-numeric table values to zero. sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: # The sum of all probabilities except that correspond to other cells # Ex here stands for expectation, more explicitly the expectation of the sum of N-1 Bernoulli random variables plus # the constant 1, which is computed as adding all N expected values and subtracting the extra one. It corresponds to X_c # in Appendix D of the original TAPAS paper which is trying to approximate the average of a random set. ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: # The sum of all probabilities except that correspond to other cells ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var multiplier = (var / torch.square(ex) + 1) / ex average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1) else: raise ValueError(f"Invalid average_approximation_function: {config.average_approximation_function}") if config.use_gumbel_for_aggregation: gumbel_dist = torch.distributions.RelaxedOneHotCategorical( config.aggregation_temperature, logits=logits_aggregation[:, 1:] ) # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = gumbel_dist.sample() else: # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = nn.functional.softmax( logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1 ) all_results = torch.cat( [ torch.unsqueeze(sum_result, dim=1), torch.unsqueeze(average_result, dim=1), torch.unsqueeze(count_result, dim=1), ], dim=1, ) expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1) return expected_result # PyTorch does not currently support Huber loss with custom delta so we define it ourself def huber_loss(input, target, delta: float = 1.0): errors = torch.abs(input - target) # shape (batch_size,) return torch.where(errors < delta, 0.5 * errors**2, errors * delta - (0.5 * delta**2)) def _calculate_regression_loss( answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config, ): """ Calculates the regression loss per example. Args: answer (`torch.FloatTensor` of shape `(batch_size,)`): Answer for every example in the batch. Nan if there is no scalar answer. aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the parameters of the model Returns: per_example_answer_loss_scaled (`torch.FloatTensor` of shape `(batch_size,)`): Scales answer loss for each example in the batch. large_answer_loss_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask which is 1 for examples for which their answer loss is larger than the answer_loss_cutoff. """ # float32 (batch_size,) expected_result = _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ) # float32 (batch_size,) answer_masked = torch.where(torch.isnan(answer), torch.zeros_like(answer), answer) if config.use_normalized_answer_loss: normalizer = (torch.max(torch.abs(expected_result), torch.abs(answer_masked)) + EPSILON_ZERO_DIVISION).detach() normalized_answer_masked = answer_masked / normalizer normalized_expected_result = expected_result / normalizer per_example_answer_loss = huber_loss( normalized_expected_result * aggregate_mask, normalized_answer_masked * aggregate_mask ) else: per_example_answer_loss = huber_loss( expected_result * aggregate_mask, answer_masked * aggregate_mask, delta=config.huber_loss_delta ) if config.answer_loss_cutoff is None: large_answer_loss_mask = torch.ones_like(per_example_answer_loss, dtype=torch.float32) else: large_answer_loss_mask = torch.where( per_example_answer_loss > config.answer_loss_cutoff, torch.zeros_like(per_example_answer_loss, dtype=torch.float32), torch.ones_like(per_example_answer_loss, dtype=torch.float32), ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask
# coding=utf-8 # Copyright 2020 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch TAPAS model.""" import enum import math import os from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import ( apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, torch_int_div, ) from ...utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_tapas import TapasConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "TapasConfig" _CHECKPOINT_FOR_DOC = "google/tapas-base" TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = [ # large models "google/tapas-large", "google/tapas-large-finetuned-sqa", "google/tapas-large-finetuned-wtq", "google/tapas-large-finetuned-wikisql-supervised", "google/tapas-large-finetuned-tabfact", # base models "google/tapas-base", "google/tapas-base-finetuned-sqa", "google/tapas-base-finetuned-wtq", "google/tapas-base-finetuned-wikisql-supervised", "google/tapas-base-finetuned-tabfact", # small models "google/tapas-small", "google/tapas-small-finetuned-sqa", "google/tapas-small-finetuned-wtq", "google/tapas-small-finetuned-wikisql-supervised", "google/tapas-small-finetuned-tabfact", # mini models "google/tapas-mini", "google/tapas-mini-finetuned-sqa", "google/tapas-mini-finetuned-wtq", "google/tapas-mini-finetuned-wikisql-supervised", "google/tapas-mini-finetuned-tabfact", # tiny models "google/tapas-tiny", "google/tapas-tiny-finetuned-sqa", "google/tapas-tiny-finetuned-wtq", "google/tapas-tiny-finetuned-wikisql-supervised", "google/tapas-tiny-finetuned-tabfact", # See all TAPAS models at https://huggingface.co/models?filter=tapas ] EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 @dataclass class TableQuestionAnsweringOutput(ModelOutput): """ Output type of [`TapasForQuestionAnswering`]. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` (and possibly `answer`, `aggregation_labels`, `numeric_values` and `numeric_values_scale` are provided)): Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Prediction scores of the cell selection head, for every token. logits_aggregation (`torch.FloatTensor`, *optional*, of shape `(batch_size, num_aggregation_labels)`): Prediction scores of the aggregation head, for every aggregation operator. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_aggregation: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None def load_tf_weights_in_tapas(model, config, tf_checkpoint_path): """ Load tf checkpoints in a PyTorch model. This is an adaptation from load_tf_weights_in_bert - add cell selection and aggregation heads - take into account additional token type embedding layers """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info(f"Converting TensorFlow checkpoint from {tf_path}") # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info(f"Loading TF weight {name} with shape {shape}") array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculate m and v # which are not required for using pretrained model if any( n in [ "adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step", "seq_relationship", ] for n in name ): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasForSequenceClassification, we skip output_bias and output_weights # since these are not used for classification if isinstance(model, TapasForSequenceClassification): if any(n in ["output_bias", "output_weights"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasModel, we skip output_bias, output_weights, output_bias_cls and output_weights_cls # since this model does not have MLM and NSP heads if isinstance(model, TapasModel): if any(n in ["output_bias", "output_weights", "output_bias_cls", "output_weights_cls"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # in case the model is TapasForMaskedLM, we skip the pooler if isinstance(model, TapasForMaskedLM): if any(n in ["pooler"] for n in name): logger.info(f"Skipping {'/'.join(name)}") continue # if first scope name starts with "bert", change it to "tapas" if name[0] == "bert": name[0] = "tapas" pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "beta": pointer = getattr(pointer, "bias") # cell selection heads elif scope_names[0] == "output_bias": if not isinstance(model, TapasForMaskedLM): pointer = getattr(pointer, "output_bias") else: pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "output_weights") elif scope_names[0] == "column_output_bias": pointer = getattr(pointer, "column_output_bias") elif scope_names[0] == "column_output_weights": pointer = getattr(pointer, "column_output_weights") # aggregation head elif scope_names[0] == "output_bias_agg": pointer = getattr(pointer, "aggregation_classifier") pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights_agg": pointer = getattr(pointer, "aggregation_classifier") pointer = getattr(pointer, "weight") # classification head elif scope_names[0] == "output_bias_cls": pointer = getattr(pointer, "classifier") pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights_cls": pointer = getattr(pointer, "classifier") pointer = getattr(pointer, "weight") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info(f"Skipping {'/'.join(name)}") continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name[-13:] in [f"_embeddings_{i}" for i in range(7)]: pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: if pointer.shape != array.shape: raise ValueError(f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched") except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {name}") # Added a check to see whether the array is a scalar (because bias terms in Tapas checkpoints can be # scalar => should first be converted to numpy arrays) if np.isscalar(array): array = np.array(array) pointer.data = torch.from_numpy(array) return model class TapasEmbeddings(nn.Module): """ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of additional token type embeddings to encode tabular structure. """ def __init__(self, config): super().__init__() # we do not include config.disabled_features and config.disable_position_embeddings from the original implementation # word embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) # position embeddings self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # token type embeddings for i, type_vocab_sizes in enumerate(config.type_vocab_sizes): name = f"token_type_embeddings_{i}" setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size)) self.number_of_token_type_embeddings = len(config.type_vocab_sizes) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: # create absolute position embeddings position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings if self.config.reset_position_index_per_cell: # shape (batch_size, seq_len) col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1) # shape (batch_size, seq_len) row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1) # shape (batch_size, seq_len) full_index = ProductIndexMap(col_index, row_index) # shape (max_rows * max_columns,). First absolute position for every cell first_position_per_segment = reduce_min(position_ids, full_index)[0] # ? shape (batch_size, seq_len). First absolute position of the cell for every token first_position = gather(first_position_per_segment, full_index) # shape (1, seq_len) position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0) position_ids = torch.min( torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position ) if token_type_ids is None: token_type_ids = torch.zeros( (input_shape + self.number_of_token_type_embeddings), dtype=torch.long, device=device ) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f"token_type_embeddings_{i}" embeddings += getattr(self, name)(token_type_ids[:, :, i]) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class TapasSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TapasModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class TapasSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TapasAttention(nn.Module): def __init__(self, config): super().__init__() self.self = TapasSelfAttention(config) self.output = TapasSelfOutput(config) self.pruned_heads = set() # Copied from transformers.models.bert.modeling_bert.BertAttention.prune_heads def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) # Copied from transformers.models.bert.modeling_bert.BertAttention.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class TapasIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class TapasOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TapasLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TapasAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = TapasAttention(config) self.intermediate = TapasIntermediate(config) self.output = TapasOutput(config) # Copied from transformers.models.bert.modeling_bert.BertLayer.forward def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertLayer.feed_forward_chunk def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class TapasEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([TapasLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_values, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) # Copied from transformers.models.bert.modeling_bert.BertPooler class TapasPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.bert.modeling_bert.BertPredictionHeadTransform with Bert->Tapas class TapasPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->Tapas class TapasLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = TapasPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOnlyMLMHead with Bert->Tapas class TapasOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = TapasLMPredictionHead(config) def forward(self, sequence_output: torch.Tensor) -> torch.Tensor: prediction_scores = self.predictions(sequence_output) return prediction_scores class TapasPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TapasConfig base_model_prefix = "tapas" supports_gradient_checkpointing = True # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, TapasEncoder): module.gradient_checkpointing = value TAPAS_START_DOCSTRING = r""" This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`TapasConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ TAPAS_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `({0}, 7)`, *optional*): Token indices that encode tabular structure. Indices can be obtained using [`AutoTokenizer`]. See this class for more info. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. If `reset_position_index_per_cell` of [`TapasConfig`] is set to `True`, relative position embeddings will be used. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.", TAPAS_START_DOCSTRING, ) class TapasModel(TapasPreTrainedModel): """ This class is a small change compared to [`BertModel`], taking into account the additional token type ids. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = TapasEmbeddings(config) self.encoder = TapasEncoder(config) self.pooler = TapasPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasModel >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasModel.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros( (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings("""Tapas Model with a `language modeling` head on top.""", TAPAS_START_DOCSTRING) class TapasForMaskedLM(TapasPreTrainedModel): _keys_to_ignore_on_load_missing = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"] config_class = TapasConfig base_model_prefix = "tapas" def __init__(self, config): super().__init__(config) self.tapas = TapasModel(config, add_pooling_layer=False) self.cls = TapasOnlyMLMHead(config) # Initialize weights and apply final processing self.post_init() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddings): self.cls.predictions.decoder = new_embeddings @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, **kwargs, ) -> Union[Tuple, MaskedLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForMaskedLM >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base") >>> model = TapasForMaskedLM.from_pretrained("google/tapas-base") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> inputs = tokenizer( ... table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt" ... ) >>> labels = tokenizer( ... table=table, queries="How many movies has George Clooney played in?", return_tensors="pt" ... )["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for SQA, WTQ or WikiSQL-supervised tasks. """, TAPAS_START_DOCSTRING, ) class TapasForQuestionAnswering(TapasPreTrainedModel): def __init__(self, config: TapasConfig): super().__init__(config) # base model self.tapas = TapasModel(config) # dropout (only used when training) self.dropout = nn.Dropout(config.hidden_dropout_prob) # cell selection heads if config.init_cell_selection_weights_to_zero: # init_cell_selection_weights_to_zero: Whether the initial weights should be # set to 0. This ensures that all tokens have the same prior probability. self.output_weights = nn.Parameter(torch.zeros(config.hidden_size)) self.column_output_weights = nn.Parameter(torch.zeros(config.hidden_size)) else: self.output_weights = nn.Parameter(torch.empty(config.hidden_size)) nn.init.normal_( self.output_weights, std=config.initializer_range ) # here, a truncated normal is used in the original implementation self.column_output_weights = nn.Parameter(torch.empty(config.hidden_size)) nn.init.normal_( self.column_output_weights, std=config.initializer_range ) # here, a truncated normal is used in the original implementation self.output_bias = nn.Parameter(torch.zeros([])) self.column_output_bias = nn.Parameter(torch.zeros([])) # aggregation head if config.num_aggregation_labels > 0: self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, table_mask: Optional[torch.LongTensor] = None, labels: Optional[torch.LongTensor] = None, aggregation_labels: Optional[torch.LongTensor] = None, float_answer: Optional[torch.FloatTensor] = None, numeric_values: Optional[torch.FloatTensor] = None, numeric_values_scale: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TableQuestionAnsweringOutput]: r""" table_mask (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*): Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0. labels (`torch.LongTensor` of shape `(batch_size, seq_length)`, *optional*): Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using [`AutoTokenizer`]. - 1 for tokens that are **part of the answer**, - 0 for tokens that are **not part of the answer**. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`, *optional*): Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in `[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for aggregation (WikiSQL-supervised). float_answer (`torch.FloatTensor` of shape `(batch_size, )`, *optional*): Float answer for every example in the batch. Set to *float('nan')* for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*): Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`, *optional*): Scale of the numeric values of every token. Can be obtained using [`AutoTokenizer`]. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForQuestionAnswering >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-wtq") >>> model = TapasForQuestionAnswering.from_pretrained("google/tapas-base-finetuned-wtq") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits_aggregation = outputs.logits_aggregation ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] pooled_output = outputs[1] sequence_output = self.dropout(sequence_output) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device # Construct indices for the table. if token_type_ids is None: token_type_ids = torch.zeros( (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device ) token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] row_ids = token_type_ids[:, :, token_types.index("row_ids")] column_ids = token_type_ids[:, :, token_types.index("column_ids")] row_index = IndexMap( indices=torch.min(row_ids, torch.as_tensor(self.config.max_num_rows - 1, device=row_ids.device)), num_segments=self.config.max_num_rows, batch_dims=1, ) col_index = IndexMap( indices=torch.min(column_ids, torch.as_tensor(self.config.max_num_columns - 1, device=column_ids.device)), num_segments=self.config.max_num_columns, batch_dims=1, ) cell_index = ProductIndexMap(row_index, col_index) # Masks. input_shape = input_ids.size() if input_ids is not None else inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) # Table cells only, without question tokens and table headers. if table_mask is None: table_mask = torch.where(row_ids > 0, torch.ones_like(row_ids), torch.zeros_like(row_ids)) # torch.FloatTensor[batch_size, seq_length] input_mask_float = attention_mask.float().to(device) table_mask_float = table_mask.float().to(device) # Mask for cells that exist in the table (i.e. that are not padding). cell_mask, _ = reduce_mean(input_mask_float, cell_index) # Compute logits per token. These are used to select individual cells. logits = compute_token_logits(sequence_output, self.config.temperature, self.output_weights, self.output_bias) # Compute logits per column. These are used to select a column. column_logits = None if self.config.select_one_column: column_logits = compute_column_logits( sequence_output, self.column_output_weights, self.column_output_bias, cell_index, cell_mask, self.config.allow_empty_column_selection, ) # Aggregation logits logits_aggregation = None if self.config.num_aggregation_labels > 0: logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation total_loss = 0.0 calculate_loss = False if labels is not None: calculate_loss = True is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision # Semi-supervised cell selection in case of no aggregation: # If the answer (the denotation) appears directly in the table we might # select the answer without applying any aggregation function. There are # some ambiguous cases, see utils._calculate_aggregate_mask for more info. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0 # for examples where we chose to select the answer directly. # `labels` encodes the positions of the answer appearing in the table. if is_supervised: aggregate_mask = None else: if float_answer is not None: assert ( labels.shape[0] == float_answer.shape[0] ), "Make sure the answers are a FloatTensor of shape (batch_size,)" # <float32>[batch_size] aggregate_mask = _calculate_aggregate_mask( float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier, ) else: raise ValueError("You have to specify float answers in order to calculate the aggregate mask") # Cell selection log-likelihood if self.config.average_logits_per_cell: logits_per_cell, _ = reduce_mean(logits, cell_index) logits = gather(logits_per_cell, cell_index) dist_per_token = torch.distributions.Bernoulli(logits=logits) # Compute cell selection loss per example. selection_loss_per_example = None if not self.config.select_one_column: weight = torch.where( labels == 0, torch.ones_like(labels, dtype=torch.float32), self.config.positive_label_weight * torch.ones_like(labels, dtype=torch.float32), ) selection_loss_per_token = -dist_per_token.log_prob(labels) * weight selection_loss_per_example = torch.sum(selection_loss_per_token * input_mask_float, dim=1) / ( torch.sum(input_mask_float, dim=1) + EPSILON_ZERO_DIVISION ) else: selection_loss_per_example, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) dist_per_token = torch.distributions.Bernoulli(logits=logits) # Supervised cell selection if self.config.disable_per_token_loss: pass elif is_supervised: total_loss += torch.mean(selection_loss_per_example) else: # For the not supervised case, do not assign loss for cell selection total_loss += torch.mean(selection_loss_per_example * (1.0 - aggregate_mask)) # Semi-supervised regression loss and supervised loss for aggregations if self.config.num_aggregation_labels > 0: if is_supervised: # Note that `aggregate_mask` is None if the setting is supervised. if aggregation_labels is not None: assert ( labels.shape[0] == aggregation_labels.shape[0] ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)" per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) else: raise ValueError( "You have to specify aggregation labels in order to calculate the aggregation loss" ) else: # Set aggregation labels to zeros aggregation_labels = torch.zeros(labels.shape[0], dtype=torch.long, device=labels.device) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) if self.config.use_answer_as_supervision: if numeric_values is not None and numeric_values_scale is not None: assert numeric_values.shape == numeric_values_scale.shape # Add regression loss for numeric answers which require aggregation. answer_loss, large_answer_loss_mask = _calculate_regression_loss( float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config, ) per_example_additional_loss += answer_loss # Zero loss for examples with answer_loss > cutoff. per_example_additional_loss *= large_answer_loss_mask else: raise ValueError( "You have to specify numeric values and numeric values scale in order to calculate the" " regression loss" ) total_loss += torch.mean(per_example_additional_loss) else: # if no label ids are provided, set them to zeros in order to properly compute logits labels = torch.zeros_like(logits) _, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) if not return_dict: output = (logits, logits_aggregation) + outputs[2:] return ((total_loss,) + output) if calculate_loss else output return TableQuestionAnsweringOutput( loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020). """, TAPAS_START_DOCSTRING, ) class TapasForSequenceClassification(TapasPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.tapas = TapasModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called "classification_class_index" in the original implementation. Returns: Examples: ```python >>> from transformers import AutoTokenizer, TapasForSequenceClassification >>> import torch >>> import pandas as pd >>> tokenizer = AutoTokenizer.from_pretrained("google/tapas-base-finetuned-tabfact") >>> model = TapasForSequenceClassification.from_pretrained("google/tapas-base-finetuned-tabfact") >>> data = { ... "Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... "Age": ["56", "45", "59"], ... "Number of movies": ["87", "53", "69"], ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = [ ... "There is only one actor who is 45 years old", ... "There are 3 actors which played in more than 60 movies", ... ] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: self.config.problem_type = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) """ TAPAS utilities.""" class AverageApproximationFunction(str, enum.Enum): RATIO = "ratio" FIRST_ORDER = "first_order" SECOND_ORDER = "second_order" # Beginning of everything related to segmented tensors class IndexMap(object): """Index grouping entries within a tensor.""" def __init__(self, indices, num_segments, batch_dims=0): """ Creates an index Args: indices (`torch.LongTensor`, same shape as a *values* Tensor to which the indices refer): Tensor containing the indices. num_segments (`torch.LongTensor`): Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same number of segments (although many segments can be empty). batch_dims (`int`, *optional*, defaults to 0): The number of batch dimensions. The first *batch_dims* dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch elements are always distinct even if they have the same index. """ self.indices = torch.as_tensor(indices) self.num_segments = torch.as_tensor(num_segments, device=indices.device) self.batch_dims = batch_dims def batch_shape(self): return self.indices.size()[: self.batch_dims] # returns a torch.Size object class ProductIndexMap(IndexMap): """The product of two indices.""" def __init__(self, outer_index, inner_index): """ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has *num_segments* equal to *outer_index.num_segments* * *inner_index.num_segments* Args: outer_index (`IndexMap`): IndexMap. inner_index (`IndexMap`): IndexMap, must have the same shape as *outer_index*. """ if outer_index.batch_dims != inner_index.batch_dims: raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.") super().__init__( indices=(inner_index.indices + outer_index.indices * inner_index.num_segments), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims, ) self.outer_index = outer_index self.inner_index = inner_index def project_outer(self, index): """Projects an index with the same index set onto the outer components.""" indices = torch_int_div(index.indices, self.inner_index.num_segments).type(torch.long) return IndexMap(indices=indices, num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims) def project_inner(self, index): """Projects an index with the same index set onto the inner components.""" return IndexMap( indices=torch.fmod(index.indices, self.inner_index.num_segments) .type(torch.float) .floor() .type(torch.long), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims, ) def gather(values, index, name="segmented_gather"): """ Gathers from *values* using the index map. For each element in the domain of the index map this operation looks up a value for that index in *values*. Two elements from the same segment always get assigned the same value. Args: values (`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)): Tensor with segment values. index (`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)): IndexMap. name (`str`, *optional*, defaults to 'segmented_gather'): Name for the operation. Currently not used Returns: `tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values. """ indices = index.indices # first, check whether the indices of the index represent scalar values (i.e. not vectorized) if len(values.shape[index.batch_dims :]) < 2: return torch.gather( values, index.batch_dims, indices.view( values.size()[0], -1 ), # torch.gather expects index to have the same number of dimensions as values ).view(indices.size()) else: # this means we have a vectorized version # we have to adjust the index indices = indices.unsqueeze(-1).expand(values.shape) return torch.gather(values, index.batch_dims, indices) def flatten(index, name="segmented_flatten"): """ Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by *num_segments* * (k - 1). The result is a tensor with *num_segments* multiplied by the number of elements in the batch. Args: index (`IndexMap`): IndexMap to flatten. name (`str`, *optional*, defaults to 'segmented_flatten'): Name for the operation. Currently not used Returns: (`IndexMap`): The flattened IndexMap. """ # first, get batch_size as scalar tensor batch_size = torch.prod(torch.tensor(list(index.batch_shape()))) # next, create offset as 1-D tensor of length batch_size, # and multiply element-wise by num segments (to offset different elements in the batch) e.g. if batch size is 2: [0, 64] offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments offset = offset.view(index.batch_shape()) for _ in range(index.batch_dims, len(index.indices.size())): # typically range(1,2) offset = offset.unsqueeze(-1) indices = offset + index.indices return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0) def range_index_map(batch_shape, num_segments, name="range_index_map"): """ Constructs an index map equal to range(num_segments). Args: batch_shape (`torch.Size`): Batch shape num_segments (`int`): Number of segments name (`str`, *optional*, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ batch_shape = torch.as_tensor( batch_shape, dtype=torch.long ) # create a rank 1 tensor vector containing batch_shape (e.g. [2]) assert len(batch_shape.size()) == 1 num_segments = torch.as_tensor(num_segments) # create a rank 0 tensor (scalar) containing num_segments (e.g. 64) assert len(num_segments.size()) == 0 indices = torch.arange( start=0, end=num_segments, device=num_segments.device ) # create a rank 1 vector with num_segments elements new_tensor = torch.cat( [torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)], dim=0, ) # new_tensor is just a vector of [1 64] for example (assuming only 1 batch dimension) new_shape = [int(x) for x in new_tensor.tolist()] indices = indices.view(new_shape) multiples = torch.cat([batch_shape, torch.as_tensor([1])], dim=0) indices = indices.repeat(multiples.tolist()) # equivalent (in Numpy:) # indices = torch.as_tensor(np.tile(indices.numpy(), multiples.tolist())) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0]) def _segment_reduce(values, index, segment_reduce_fn, name): """ Applies a segment reduction segment-wise. Args: values (`torch.Tensor`): Tensor with segment values. index (`IndexMap`): IndexMap. segment_reduce_fn (`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (`str`): Name for the operation. Currently not used Returns: (`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ # Flatten the batch dimensions, as segments ops (scatter) do not support batching. # However if `values` has extra dimensions to the right keep them # unflattened. Segmented ops support vector-valued operations. flat_index = flatten(index) vector_shape = values.size()[len(index.indices.size()) :] # torch.Size object flattened_shape = torch.cat( [torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0 ) # changed "view" by "reshape" in the following line flat_values = values.reshape(flattened_shape.tolist()) out = torch.zeros(int(flat_index.num_segments), dtype=torch.float, device=flat_values.device) segment_means = out.scatter_reduce( dim=0, index=flat_index.indices.long(), src=flat_values.float(), reduce=segment_reduce_fn, include_self=False ) # Unflatten the values. new_shape = torch.cat( [ torch.as_tensor(index.batch_shape(), dtype=torch.long), torch.as_tensor([index.num_segments], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long), ], dim=0, ) output_values = segment_means.clone().view(new_shape.tolist()).to(values.dtype) output_index = range_index_map(index.batch_shape(), index.num_segments) return output_values, output_index def reduce_sum(values, index, name="segmented_reduce_sum"): """ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a sum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the sum must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. . """ return _segment_reduce(values, index, "sum", name) def reduce_mean(values, index, name="segmented_reduce_mean"): """ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a mean of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the mean must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "mean", name) def reduce_max(values, index, name="segmented_reduce_max"): """ Computes the maximum over segments. This operation computes the maximum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise maximum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the max must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "amax", name) def reduce_min(values, index, name="segmented_reduce_min"): """ Computes the minimum over segments. This operations computes the minimum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise minimum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the min must be taken segment-wise. index (`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (`str`, *optional*, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "amin", name) # End of everything related to segmented tensors def compute_column_logits( sequence_output, column_output_weights, column_output_bias, cell_index, cell_mask, allow_empty_column_selection ): """ Computes the column logits. Args: sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. column_output_weights (`torch.FloatTensor` of shape `(hidden_size)`): Weights of the linear layer for column selection. column_output_bias (`torch.FloatTensor` of shape `()`): Bias of the linear layer for column selection. cell_index (`ProductIndexMap`): Index that groups tokens into cells. cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (`bool`): Whether to allow not to select any column Returns: column_logits (`torch.FloatTensor`of shape `(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch. """ # First, compute the token logits (batch_size, seq_len) - without temperature token_logits = torch.einsum("bsj,j->bs", sequence_output, column_output_weights) + column_output_bias # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows) cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) # Finally, average the logits per column (batch_size, max_num_cols) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION # Mask columns that do not appear in the example. is_padding = torch.logical_and(cell_count < 0.5, ~torch.eq(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( is_padding, dtype=torch.float32, device=is_padding.device ) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( torch.eq(out_index.indices, 0), dtype=torch.float32, device=out_index.indices.device ) return column_logits def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask): """ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside the selected column are never selected. Args: token_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Tensor containing the logits per token. column_logits (`torch.FloatTensor` of shape `(batch_size, max_num_cols)`): Tensor containing the logits per column. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Labels per token. cell_index (`ProductIndexMap`): Index that groups tokens into cells. col_index (`IndexMap`): Index that groups tokens into columns. cell_mask (`torch.FloatTensor` of shape `(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). Returns: selection_loss_per_example (`torch.FloatTensor` of shape `(batch_size,)`): Loss for each example. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single column. Logits outside of the most likely column according to *column_logits* will be set to a very low value (such that the probabilities are 0). """ # Part 1: column loss # First find the column we should select. We use the column with maximum number of selected cells. labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index) # shape of labels_per_column is (batch_size, max_num_cols). It contains the number of label ids for every column, for every example column_label = torch.argmax(labels_per_column, dim=-1) # shape (batch_size,) # Check if there are no selected cells in the column. In that case the model # should predict the special column id 0, which means "select nothing". no_cell_selected = torch.eq( torch.max(labels_per_column, dim=-1)[0], 0 ) # no_cell_selected is of shape (batch_size,) and equals True # if an example of the batch has no cells selected (i.e. if there are no labels set to 1 for that example) column_label = torch.where( no_cell_selected.view(column_label.size()), torch.zeros_like(column_label), column_label ) column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols) column_loss_per_example = -column_dist.log_prob(column_label) # Part 2: cell loss # Reduce the labels and logits to per-cell from per-token. # logits_per_cell: shape (batch_size, max_num_rows*max_num_cols) i.e. (batch_size, 64*32) logits_per_cell, _ = reduce_mean(token_logits, cell_index) # labels_per_cell: shape (batch_size, 64*32), indicating whether each cell should be selected (1) or not (0) labels_per_cell, labels_index = reduce_max( torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index ) # Mask for the selected column. # column_id_for_cells: shape (batch_size, 64*32), indicating to which column each cell belongs column_id_for_cells = cell_index.project_inner(labels_index).indices # column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column to be selected column_mask = torch.as_tensor( torch.eq(column_id_for_cells, torch.unsqueeze(column_label, dim=-1)), dtype=torch.float32, device=cell_mask.device, ) # Compute the log-likelihood for cells, but only for the selected column. cell_dist = torch.distributions.Bernoulli(logits=logits_per_cell) # shape (batch_size, 64*32) cell_log_prob = cell_dist.log_prob(labels_per_cell.type(torch.float32)) # shape(batch_size, 64*32) cell_loss = -torch.sum(cell_log_prob * column_mask * cell_mask, dim=1) # We need to normalize the loss by the number of cells in the column. cell_loss /= torch.sum(column_mask * cell_mask, dim=1) + EPSILON_ZERO_DIVISION selection_loss_per_example = column_loss_per_example selection_loss_per_example += torch.where( no_cell_selected.view(selection_loss_per_example.size()), torch.zeros_like(selection_loss_per_example), cell_loss, ) # Set the probs outside the selected column (selected by the *model*) # to 0. This ensures backwards compatibility with models that select # cells from multiple columns. selected_column_id = torch.as_tensor( torch.argmax(column_logits, dim=-1), dtype=torch.long, device=column_logits.device ) # shape (batch_size,) # selected_column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column selected by the model selected_column_mask = torch.as_tensor( torch.eq(column_id_for_cells, torch.unsqueeze(selected_column_id, dim=-1)), dtype=torch.float32, device=selected_column_id.device, ) # Never select cells with the special column id 0. selected_column_mask = torch.where( torch.eq(column_id_for_cells, 0).view(selected_column_mask.size()), torch.zeros_like(selected_column_mask), selected_column_mask, ) new_logits_per_cell = logits_per_cell + CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask) logits = gather(new_logits_per_cell, cell_index) return selection_loss_per_example, logits def compute_token_logits(sequence_output, temperature, output_weights, output_bias): """ Computes logits per token Args: sequence_output (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. temperature (`float`): Temperature for the Bernoulli distribution. output_weights (`torch.FloatTensor` of shape `(hidden_size,)`): Weights of the linear layer for cell selection. output_bias (`torch.FloatTensor` of shape `()`): Bias of the linear layer for cell selection Returns: logits (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Logits per token. """ logits = (torch.einsum("bsj,j->bs", sequence_output, output_weights) + output_bias) / temperature return logits def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier): """ Finds examples where the model should select cells with no aggregation. Returns a mask that determines for which examples should the model select answers directly from the table, without any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold for this is a hyperparameter *cell_selection_preference* Args: answer (`torch.FloatTensor` of shape `(batch_size, )`): Answer for every example in the batch. Nan if there is no scalar answer. pooled_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`): Output of the pooler (BertPooler) on top of the encoder layer. cell_selection_preference (`float`): Preference for cell selection in ambiguous cases. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Labels per token. aggregation_classifier (`torch.nn.Linear`): Aggregation head Returns: aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. """ # torch.FloatTensor(batch_size,) aggregate_mask_init = torch.logical_not(torch.isnan(answer)).type(torch.FloatTensor).to(answer.device) logits_aggregation = aggregation_classifier(pooled_output) dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1) # Cell selection examples according to current model. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference # Examples with non-empty cell selection supervision. is_cell_supervision_available = torch.sum(labels, dim=1) > 0 # torch.where is not equivalent to tf.where (in tensorflow 1) # hence the added .view on the condition to match the shape of the first tensor aggregate_mask = torch.where( torch.logical_and(is_pred_cell_selection, is_cell_supervision_available).view(aggregate_mask_init.size()), torch.zeros_like(aggregate_mask_init, dtype=torch.float32), aggregate_mask_init, ) aggregate_mask = aggregate_mask.detach() return aggregate_mask def _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ): """ Calculates aggregation loss when its type is known during training. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation" should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting where aggregation type is always known, standard cross entropy loss is accumulated for all examples Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. Returns: aggregation_loss_known (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (when its type is known during training) per example. """ if use_answer_as_supervision: # Prepare "no aggregation" targets for cell selection examples. target_aggregation = torch.zeros_like(aggregate_mask, dtype=torch.long) else: # Use aggregation supervision as the target. target_aggregation = aggregation_labels one_hot_labels = nn.functional.one_hot(target_aggregation, num_classes=num_aggregation_labels).type(torch.float32) log_probs = nn.functional.log_softmax(logits_aggregation, dim=-1) # torch.FloatTensor[batch_size] per_example_aggregation_intermediate = -torch.sum(one_hot_labels * log_probs, dim=-1) if use_answer_as_supervision: # Accumulate loss only for examples requiring cell selection # (no aggregation). return per_example_aggregation_intermediate * (1 - aggregate_mask) else: return per_example_aggregation_intermediate def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): """ Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss (in case of answer supervision) per example. """ dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation) # Index 0 corresponds to "no aggregation". aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1) # Predict some aggregation in case of an answer that needs aggregation. # This increases the probability of all aggregation functions, in a way # similar to MML, but without considering whether the function gives the # correct answer. return -torch.log(aggregation_ops_total_mass) * aggregate_mask def _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight, ): """ Calculates the aggregation loss per example. Args: logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (`torch.FloatTensor` of shape `(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (`torch.LongTensor` of shape `(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (`bool`, *optional*): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (`int`, *optional*, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (`float`, *optional*, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (`torch.FloatTensor` of shape `(batch_size,)`): Aggregation loss per example. """ per_example_aggregation_loss = _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ) if use_answer_as_supervision: # Add aggregation loss for numeric answers that need aggregation. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss def _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ): """ Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the hyperparameters of the model Returns: expected_result (`torch.FloatTensor` of shape `(batch_size,)`): The expected result per example. """ if config.use_gumbel_for_cells: gumbel_dist = torch.distributions.RelaxedBernoulli( # The token logits where already divided by the temperature and used for # computing cell selection errors so we need to multiply it again here temperature=config.temperature, logits=dist_per_cell.logits * config.temperature, ) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs # <float32>[batch_size, seq_length] scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float count_result = torch.sum(scaled_probability_per_cell, dim=1) numeric_values_masked = torch.where( torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values ) # Mask non-numeric table values to zero. sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: # The sum of all probabilities except that correspond to other cells # Ex here stands for expectation, more explicitly the expectation of the sum of N-1 Bernoulli random variables plus # the constant 1, which is computed as adding all N expected values and subtracting the extra one. It corresponds to X_c # in Appendix D of the original TAPAS paper which is trying to approximate the average of a random set. ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: # The sum of all probabilities except that correspond to other cells ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var multiplier = (var / torch.square(ex) + 1) / ex average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1) else: raise ValueError(f"Invalid average_approximation_function: {config.average_approximation_function}") if config.use_gumbel_for_aggregation: gumbel_dist = torch.distributions.RelaxedOneHotCategorical( config.aggregation_temperature, logits=logits_aggregation[:, 1:] ) # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = gumbel_dist.sample() else: # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = nn.functional.softmax( logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1 ) all_results = torch.cat( [ torch.unsqueeze(sum_result, dim=1), torch.unsqueeze(average_result, dim=1), torch.unsqueeze(count_result, dim=1), ], dim=1, ) expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1) return expected_result # PyTorch does not currently support Huber loss with custom delta so we define it ourself def huber_loss(input, target, delta: float = 1.0): errors = torch.abs(input - target) # shape (batch_size,) return torch.where(errors < delta, 0.5 * errors**2, errors * delta - (0.5 * delta**2)) def _calculate_regression_loss( answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config, ): """ Calculates the regression loss per example. Args: answer (`torch.FloatTensor` of shape `(batch_size,)`): Answer for every example in the batch. Nan if there is no scalar answer. aggregate_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. dist_per_cell (`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (`torch.FloatTensor` of shape `(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (`torch.FloatTensor` of shape `(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config ([`TapasConfig`]): Model configuration class with all the parameters of the model Returns: per_example_answer_loss_scaled (`torch.FloatTensor` of shape `(batch_size,)`): Scales answer loss for each example in the batch. large_answer_loss_mask (`torch.FloatTensor` of shape `(batch_size,)`): A mask which is 1 for examples for which their answer loss is larger than the answer_loss_cutoff. """ # float32 (batch_size,) expected_result = _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ) # float32 (batch_size,) answer_masked = torch.where(torch.isnan(answer), torch.zeros_like(answer), answer) if config.use_normalized_answer_loss: normalizer = (torch.max(torch.abs(expected_result), torch.abs(answer_masked)) + EPSILON_ZERO_DIVISION).detach() normalized_answer_masked = answer_masked / normalizer normalized_expected_result = expected_result / normalizer per_example_answer_loss = huber_loss( normalized_expected_result * aggregate_mask, normalized_answer_masked * aggregate_mask ) else: per_example_answer_loss = huber_loss( expected_result * aggregate_mask, answer_masked * aggregate_mask, delta=config.huber_loss_delta ) if config.answer_loss_cutoff is None: large_answer_loss_mask = torch.ones_like(per_example_answer_loss, dtype=torch.float32) else: large_answer_loss_mask = torch.where( per_example_answer_loss > config.answer_loss_cutoff, torch.zeros_like(per_example_answer_loss, dtype=torch.float32), torch.ones_like(per_example_answer_loss, dtype=torch.float32), ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/generation/test_beam_search.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers.generation import ( BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, PhrasalConstraint, ) class BeamSearchTester: def __init__( self, parent, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_beam_scorer(self, **kwargs): return BeamSearchScorer( batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer beam_scorer = self.prepare_beam_scorer(do_early_stopping=True) beam_hyp = beam_scorer._beam_hyps[0] self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init beam_scorer = self.prepare_beam_scorer(do_early_stopping=False) beam_hyp = beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores): # check too many eos tokens beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id) # check all batches are done beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) # beam scorer should be done self.parent.assertTrue(beam_scorer.is_done) # check beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx expected_output_indices = ( cut_expected_tensor(next_indices) + (torch.arange(self.num_beams * self.batch_size, device=torch_device) // self.num_beams) * self.num_beams ) self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer expected_beam_indices = list(range(10)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False) # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned beam_scorer.num_beam_hyps_to_keep = self.num_beams sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) class ConstrainedBeamSearchTester: def __init__( self, parent, constraints=None, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep if constraints is None: force_tokens = torch.randint(10, 50, (1, 2))[0].tolist() disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist() constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)] self.constraints = constraints # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_constrained_beam_scorer(self, **kwargs): return ConstrainedBeamSearchScorer( constraints=kwargs.get("constraints", self.constraints), batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) scores_for_all_vocab, _ = ( -floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device) ).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True) beam_hyp = constrained_beam_scorer._beam_hyps[0] self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False) beam_hyp = constrained_beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_constrained_beam_scorer_update( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # check too many eos tokens constrained_beam_scorer = self.prepare_constrained_beam_scorer() stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # check all batches are done constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # beam scorer should be done self.parent.assertTrue(constrained_beam_scorer.is_done) # check constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx expected_output_indices = ( cut_expected_tensor(next_indices) + (torch.arange(self.num_beams * self.batch_size, device=torch_device) // self.num_beams) * self.num_beams ) self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) def check_constrained_beam_scorer_finalize( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 # for testing finalize, we do want to have fulfilled constraints stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False ) constraints = constrained_beam_scorer.constraints # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # test that the constraint is indeed fulfilled for output, constraint in [(s, c) for s in sequences for c in constraints]: forced_token_ids = constraint.token_ids if isinstance(forced_token_ids[0], list): # disjunctive case flag = False for token_ids in forced_token_ids: if self._check_sequence_inside_sequence(output, token_ids): flag = True break self.parent.assertEqual(flag, True) else: self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned # constrained_beam_scorer.num_beam_hyps_to_keep = self.num_beams constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False ) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break return flag @require_torch class BeamSearchTest(unittest.TestCase): def setUp(self): self.beam_search_tester = BeamSearchTester(self) def test_beam_hypotheses(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_hypotheses(*inputs) def test_beam_scorer_update(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scorer_update(*inputs) def test_beam_scorer_finalize(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scores_finalize(*inputs) @require_torch class ConstrainedBeamSearchTest(unittest.TestCase): def setUp(self): self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self) def test_constrained_beam_hypotheses(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_beam_hypotheses(*inputs) def test_constrained_beam_scorer_update(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs) def test_constrained_beam_scorer_finalize(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, torch_device from ..test_modeling_common import floats_tensor, ids_tensor if is_torch_available(): import torch from transformers.generation import ( BeamHypotheses, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, PhrasalConstraint, ) from transformers.pytorch_utils import torch_int_div class BeamSearchTester: def __init__( self, parent, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_beam_scorer(self, **kwargs): return BeamSearchScorer( batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer beam_scorer = self.prepare_beam_scorer(do_early_stopping=True) beam_hyp = beam_scorer._beam_hyps[0] self.parent.assertEqual(len(beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init beam_scorer = self.prepare_beam_scorer(do_early_stopping=False) beam_hyp = beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_beam_scorer_update(self, input_ids, next_tokens, next_indices, next_scores): # check too many eos tokens beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): beam_scorer.process(input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id) # check all batches are done beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) # beam scorer should be done self.parent.assertTrue(beam_scorer.is_done) # check beam_scorer = self.prepare_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id, beam_indices=beam_indices ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch_int_div(torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer expected_beam_indices = list(range(10)) for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) self.parent.assertListEqual( expected_beam_indices + [correct_idx], torch.tensor(beam_scorer._beam_hyps[batch_idx].beams[0][2]).tolist(), ) def check_beam_scores_finalize(self, input_ids, next_tokens, next_indices, next_scores): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 beam_scorer = self.prepare_beam_scorer(num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False) # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = beam_scorer.process( input_ids, next_scores, tokens, next_indices, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize beam_indices = torch.zeros_like(input_ids) + torch.arange(input_ids.shape[-1], device=input_ids.device) beam_indices = tuple(tuple(b) for b in beam_indices) sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned beam_scorer.num_beam_hyps_to_keep = self.num_beams sequence_output = beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, beam_indices=beam_indices, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) class ConstrainedBeamSearchTester: def __init__( self, parent, constraints=None, batch_size=3, sequence_length=10, vocab_size=99, pad_token_id=0, max_length=20, num_beams=4, length_penalty=2.0, do_early_stopping=True, num_beam_hyps_to_keep=2, ): self.parent = parent self.batch_size = batch_size self.sequence_length = sequence_length self.vocab_size = vocab_size self.pad_token_id = pad_token_id self.max_length = max_length self.num_beams = num_beams self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep if constraints is None: force_tokens = torch.randint(10, 50, (1, 2))[0].tolist() disjunctive_tokens = torch.randint(10, 50, (2, 2)).tolist() constraints = [PhrasalConstraint(force_tokens), DisjunctiveConstraint(disjunctive_tokens)] self.constraints = constraints # cannot be randomly generated self.eos_token_id = vocab_size + 1 def prepare_constrained_beam_scorer(self, **kwargs): return ConstrainedBeamSearchScorer( constraints=kwargs.get("constraints", self.constraints), batch_size=kwargs.get("batch_size", self.batch_size), num_beams=kwargs.get("num_beams", self.num_beams), device=torch_device, length_penalty=kwargs.get("length_penalty", self.length_penalty), do_early_stopping=kwargs.get("do_early_stopping", self.do_early_stopping), num_beam_hyps_to_keep=kwargs.get("num_beam_hyps_to_keep", self.num_beam_hyps_to_keep), ) def prepare_inputs(self): input_ids = ids_tensor((self.batch_size * self.num_beams, self.sequence_length), self.vocab_size) next_tokens = ids_tensor((self.batch_size, 2 * self.num_beams), self.vocab_size).to(torch_device) next_indices = ids_tensor((self.batch_size, 2 * self.num_beams), self.num_beams).to(torch_device) next_scores, _ = (-floats_tensor((self.batch_size, 2 * self.num_beams)).to(torch_device)).sort(descending=True) scores_for_all_vocab, _ = ( -floats_tensor((self.batch_size * self.num_beams, self.vocab_size)).to(torch_device) ).sort(descending=True) return (input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab) def check_beam_hypotheses(self, input_ids, *args): # check that correct number of beam hypotheses is set in beam scorer constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=True) beam_hyp = constrained_beam_scorer._beam_hyps[0] self.parent.assertEqual(len(constrained_beam_scorer._beam_hyps), self.batch_size) # check correct type self.parent.assertTrue(isinstance(beam_hyp, BeamHypotheses)) # check that num_beams is correctly set self.parent.assertEqual(beam_hyp.num_beams, self.num_beams) # check for early stopping deactivated for beam_idx in range(self.num_beams): beam_hyp.add(input_ids[beam_idx], -10.0) # if early stopping True -> score does not matter self.parent.assertTrue(beam_hyp.is_done(-10.0, 5)) # re-init constrained_beam_scorer = self.prepare_constrained_beam_scorer(do_early_stopping=False) beam_hyp = constrained_beam_scorer._beam_hyps[0] # add `num_beams + 1` beams to change `worst_score` for beam_idx in range(self.num_beams + 1): beam_hyp.add(input_ids[beam_idx], -10.0 + float(beam_idx)) # -10.0 is removed => -9.0 is worst score self.parent.assertAlmostEqual(beam_hyp.worst_score, -9.0 / (self.sequence_length**beam_hyp.length_penalty)) # -5.0 is better than worst score => should not be finished self.parent.assertFalse(beam_hyp.is_done(-5.0, self.sequence_length)) # -20.0 is worse than worst score => should be finished self.parent.assertTrue(beam_hyp.is_done(-20.0, self.sequence_length)) def check_constrained_beam_scorer_update( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # check too many eos tokens constrained_beam_scorer = self.prepare_constrained_beam_scorer() stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence tokens = next_tokens.clone() tokens[0, :] = self.eos_token_id with self.parent.assertRaises(ValueError): constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # check all batches are done constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, : self.num_beams] = self.eos_token_id constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) # beam scorer should be done self.parent.assertTrue(constrained_beam_scorer.is_done) # check constrained_beam_scorer = self.prepare_constrained_beam_scorer() tokens = next_tokens.clone() tokens[:, 1] = self.eos_token_id beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] def cut_expected_tensor(tensor): return torch.cat([tensor[:, :1], tensor[:, 2 : self.num_beams + 1]], dim=1).flatten() # check all outptus # cut out id of eos token and take best `num_beams` outputs expected_output_tokens = cut_expected_tensor(tokens) expected_output_scores = cut_expected_tensor(next_scores) # add num_beams * batch_idx offset = torch_int_div(torch.arange(self.num_beams * self.batch_size, device=torch_device), self.num_beams) expected_output_indices = cut_expected_tensor(next_indices) + offset * self.num_beams self.parent.assertListEqual(expected_output_tokens.tolist(), output_tokens.tolist()) self.parent.assertListEqual(expected_output_indices.tolist(), output_indices.tolist()) self.parent.assertTrue(torch.allclose(expected_output_scores, output_scores, atol=1e-3)) # make sure ids of eos token are correctly saved in beam_hyps of beam scorer for batch_idx in range(self.batch_size): correct_idx = batch_idx * self.num_beams + next_indices[batch_idx, 1] self.parent.assertListEqual( input_ids[correct_idx].tolist(), constrained_beam_scorer._beam_hyps[batch_idx].beams[0][1].tolist() ) def check_constrained_beam_scorer_finalize( self, input_ids, next_tokens, next_indices, next_scores, scores_for_all_vocab ): # max_length should be only one more than current input_ids to check that eos is correctly appended max_length = self.sequence_length + 1 # for testing finalize, we do want to have fulfilled constraints stacked_token_ids = [] for constraint in self.constraints: token_ids = constraint.token_ids token_ids = token_ids[0] if isinstance(token_ids[0], list) else token_ids stacked_token_ids = stacked_token_ids + token_ids fulfilling_sequence = torch.LongTensor(stacked_token_ids) fulfill_len = fulfilling_sequence.size(0) input_ids[:, :fulfill_len] = fulfilling_sequence constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=1, length_penalty=1.0, do_early_stopping=False ) constraints = constrained_beam_scorer.constraints # update beams and append to input_ids tokens = next_tokens.clone() # first batch, first output has to finish with eos token id since scores are correctly sorted tokens[0, 0] = self.eos_token_id # make sure corresponding score is as good as possible to surely be picked first next_scores[0, 0] = 0.0 beam_outputs = constrained_beam_scorer.process( input_ids, next_scores, tokens, next_indices, scores_for_all_vocab, eos_token_id=self.eos_token_id ) output_scores = beam_outputs["next_beam_scores"] output_tokens = beam_outputs["next_beam_tokens"] output_indices = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[output_indices, :], output_tokens.unsqueeze(-1)], dim=-1) # finalize sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] # since `num_beam_hyps_to_keep` = 1 => only return `batch_size` x `max_length` self.parent.assertListEqual(list(sequences.shape), [self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.batch_size]) # check sequence_scores self.parent.assertFalse((sequence_scores > 0).any().item()) # first batch has to finish with eos_token self.parent.assertEqual(sequences[0, -1].item(), self.eos_token_id) # other batches cannot finish with eos token self.parent.assertNotEqual(sequences[1, -1].item(), self.eos_token_id) self.parent.assertNotEqual(sequences[2, -1].item(), self.eos_token_id) # test that the constraint is indeed fulfilled for output, constraint in [(s, c) for s in sequences for c in constraints]: forced_token_ids = constraint.token_ids if isinstance(forced_token_ids[0], list): # disjunctive case flag = False for token_ids in forced_token_ids: if self._check_sequence_inside_sequence(output, token_ids): flag = True break self.parent.assertEqual(flag, True) else: self.parent.assertEqual(self._check_sequence_inside_sequence(output, forced_token_ids), True) # now test that if `num_beam_hyps_to_keep` is 3 => all beams are returned # constrained_beam_scorer.num_beam_hyps_to_keep = self.num_beams constrained_beam_scorer = self.prepare_constrained_beam_scorer( num_beam_hyps_to_keep=self.num_beams, length_penalty=1.0, do_early_stopping=False ) sequence_output = constrained_beam_scorer.finalize( input_ids, output_scores, output_tokens, output_indices, pad_token_id=self.pad_token_id, eos_token_id=self.eos_token_id, max_length=max_length, ) sequences = sequence_output["sequences"] sequence_scores = sequence_output["sequence_scores"] self.parent.assertListEqual(list(sequences.shape), [self.num_beams * self.batch_size, max_length]) self.parent.assertListEqual(list(sequence_scores.shape), [self.num_beams * self.batch_size]) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break return flag @require_torch class BeamSearchTest(unittest.TestCase): def setUp(self): self.beam_search_tester = BeamSearchTester(self) def test_beam_hypotheses(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_hypotheses(*inputs) def test_beam_scorer_update(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scorer_update(*inputs) def test_beam_scorer_finalize(self): inputs = self.beam_search_tester.prepare_inputs() self.beam_search_tester.check_beam_scores_finalize(*inputs) @require_torch class ConstrainedBeamSearchTest(unittest.TestCase): def setUp(self): self.constrained_beam_search_tester = ConstrainedBeamSearchTester(self) def test_constrained_beam_hypotheses(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_beam_hypotheses(*inputs) def test_constrained_beam_scorer_update(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_update(*inputs) def test_constrained_beam_scorer_finalize(self): inputs = self.constrained_beam_search_tester.prepare_inputs() self.constrained_beam_search_tester.check_constrained_beam_scorer_finalize(*inputs)
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/wav2vec2/test_modeling_wav2vec2.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Wav2Vec2 model. """ import math import multiprocessing import os import pickle import tempfile import traceback import unittest import numpy as np from datasets import load_dataset from transformers import Wav2Vec2Config, is_torch_available from transformers.testing_utils import ( CaptureLogger, is_pt_flax_cross_test, is_pyctcdecode_available, is_torchaudio_available, require_pyctcdecode, require_soundfile, require_torch, require_torchaudio, run_test_in_subprocess, slow, torch_device, ) from transformers.utils import is_torch_fx_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2ForSequenceClassification, Wav2Vec2ForXVector, Wav2Vec2Model, Wav2Vec2Processor, ) from transformers.models.wav2vec2.modeling_wav2vec2 import ( Wav2Vec2GumbelVectorQuantizer, _compute_mask_indices, _sample_negative_indices, ) from transformers.pytorch_utils import is_torch_less_than_1_9 else: is_torch_less_than_1_9 = True if is_torchaudio_available(): import torchaudio if is_pyctcdecode_available(): import pyctcdecode.decoder from transformers import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm import processing_wav2vec2_with_lm if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits # use a spawn pool, which should trigger a warning if different than fork with CaptureLogger(pyctcdecode.decoder.logger) as cl, multiprocessing.get_context("spawn").Pool(1) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") # force batch_decode to internally create a spawn pool, which should trigger a warning if different than fork multiprocessing.set_start_method("spawn", force=True) with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl: transcription = processor.batch_decode(logits.cpu().numpy()).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class Wav2Vec2ModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=4, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return Wav2Vec2Config( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lenghts are at least # one shorter than logit lenghts to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Wav2Vec2ForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ForSequenceClassification, "automatic-speech-recognition": Wav2Vec2ForCTC, "feature-extraction": Wav2Vec2Model, "fill-mask": Wav2Vec2ForMaskedLM, } if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Wav2Vec2 has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # Wav2Vec2 cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # Wav2Vec2 has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass @is_pt_flax_cross_test # non-robust architecture does not exist in Flax def test_equivalence_flax_to_pt(self): pass @is_pt_flax_cross_test # non-robust architecture does not exist in Flax def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any([x in name for x in uniform_init_parms]): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) # Wav2Vec2 cannot be torchscripted because of group norm. def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) if ( isinstance(model, Wav2Vec2ForSequenceClassification) and not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @require_torch class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForXVector, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Wav2Vec2 has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # Wav2Vec2 cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # Wav2Vec2 has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any([x in name for x in uniform_init_parms]): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_model_for_pretraining(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = Wav2Vec2ForPreTraining(config).to(torch_device) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss # more losses mask_time_indices[:, : mask_time_indices.shape[-1] // 2] = True sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices.cpu().numpy()) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss_more_masked = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss # loss_more_masked has to be bigger or equal loss since more masked inputs have to be predicted self.assertTrue(loss.detach().item() <= loss_more_masked.detach().item()) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @require_torch class Wav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) def test_compute_perplexity(self): probs = torch.arange(100, device=torch_device).reshape(2, 5, 10) / 100 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) # mask half of the input mask = torch.ones((2,), device=torch_device, dtype=torch.bool) mask[0] = 0 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 features = (torch.arange(sequence_length * hidden_size, device=torch_device) // hidden_size).view( sequence_length, hidden_size ) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @require_soundfile @slow class Wav2Vec2ModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_normal(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_inference_integration(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) np.random.seed(4) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # cosine similarity of model is all > 0.5 as model is # pre-trained on contrastive loss # fmt: off expected_cosine_sim_masked = torch.tensor([ 0.8523, 0.5860, 0.6905, 0.5557, 0.7456, 0.5249, 0.6639, 0.7654, 0.7565, 0.8167, 0.8222, 0.7960, 0.8034, 0.8166, 0.8310, 0.8263, 0.8274, 0.8258, 0.8179, 0.8412, 0.8536, 0.5098, 0.4728, 0.6461, 0.4498, 0.6002, 0.5774, 0.6457, 0.7123, 0.5668, 0.6866, 0.4960, 0.6293, 0.7423, 0.7419, 0.7526, 0.7768, 0.4898, 0.5393, 0.8183 ], device=torch_device) # fmt: on self.assertTrue(torch.allclose(cosine_sim_masked, expected_cosine_sim_masked, atol=1e-3)) def test_inference_pretrained(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # ... now compare to randomly initialized model config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-base") model_rand = Wav2Vec2ForPreTraining(config).to(torch_device).eval() with torch.no_grad(): outputs_rand = model_rand( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim_rand = torch.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states, dim=-1 ) # retrieve cosine sim of masked features cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] # a pretrained wav2vec2 model has learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states > 0.5 # a random wav2vec2 model has not learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states is very likely < 0.1 self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_loss_pretraining(self): model = Wav2Vec2ForPreTraining.from_pretrained( "facebook/wav2vec2-base", attention_dropout=0.0, feat_proj_dropout=0.0, hidden_dropout=0.0, layerdrop=0.0, ) model.to(torch_device).train() feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) np.random.seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices( mask_time_indices.shape, model.config.num_negatives, mask_time_indices ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ) # check diversity loss num_codevectors = model.config.num_codevectors_per_group * model.config.num_codevector_groups diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors self.assertTrue(abs(diversity_loss.item() - 0.9538) < 1e-3) # check overall loss (contrastive loss + diversity loss) expected_loss = 116.7094 self.assertTrue(abs(outputs.loss.item() - expected_loss) < 1e-3) def test_inference_keyword_spotting(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") input_data = self._load_superb("ks", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [7, 6, 10, 9] # s3prl logits for the same batch expected_logits = torch.tensor([6.1186, 11.8961, 10.2931, 6.0898], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_inference_intent_classification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ic").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ic") input_data = self._load_superb("ic", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits_action, predicted_ids_action = torch.max(outputs.logits[:, :6], dim=-1) predicted_logits_object, predicted_ids_object = torch.max(outputs.logits[:, 6:20], dim=-1) predicted_logits_location, predicted_ids_location = torch.max(outputs.logits[:, 20:24], dim=-1) expected_labels_action = [0, 0, 2, 3] expected_logits_action = torch.tensor([0.4568, 11.0848, 1.6621, 9.3841], device=torch_device) expected_labels_object = [3, 10, 3, 4] expected_logits_object = torch.tensor([1.5322, 10.7094, 5.2469, 22.1318], device=torch_device) expected_labels_location = [0, 0, 0, 1] expected_logits_location = torch.tensor([1.5335, 6.5096, 10.5704, 11.0569], device=torch_device) self.assertListEqual(predicted_ids_action.tolist(), expected_labels_action) self.assertListEqual(predicted_ids_object.tolist(), expected_labels_object) self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location) self.assertTrue(torch.allclose(predicted_logits_action, expected_logits_action, atol=1e-2)) self.assertTrue(torch.allclose(predicted_logits_object, expected_logits_object, atol=1e-2)) self.assertTrue(torch.allclose(predicted_logits_location, expected_logits_location, atol=1e-2)) def test_inference_speaker_identification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-sid").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-sid") input_data = self._load_superb("si", 4) output_logits = [] with torch.no_grad(): for example in input_data["speech"]: input = processor(example, return_tensors="pt", padding=True) output = model(input.input_values.to(torch_device), attention_mask=None) output_logits.append(output.logits[0]) output_logits = torch.stack(output_logits) predicted_logits, predicted_ids = torch.max(output_logits, dim=-1) expected_labels = [251, 1, 1, 3] # s3prl logits for the same batch expected_logits = torch.tensor([37.5627, 71.6362, 64.2419, 31.7778], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_inference_emotion_recognition(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-er").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-er") input_data = self._load_superb("er", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [1, 1, 2, 2] # s3prl logits for the same batch expected_logits = torch.tensor([2.1722, 3.0779, 8.0287, 6.6797], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_phoneme_recognition(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "ɐ m æ n s ɛ d t ə ð ə j uː n ɪ v ɚ s s ɚ aɪ ɛ ɡ z ɪ s t", "s w ɛ t k ʌ v ɚ d b ɹ iː ɔ n z b ɑː d i t ɹ ɪ k l ɪ ŋ ɪ n t ə ð ə t aɪ t l oɪ n k l ɑː θ ð æ w ʌ z ð ɪ oʊ" " n l i ɡ ɑːɹ m ə n t h iː w ɔːɹ", "ð ə k aɪ t ɔ n h ɪ z tʃ ɛ s t s t ɪ l d ɹ ɪ p ɪ ŋ b l ʌ d ð ɪ eɪ k ʌ v h ɪ z oʊ v ɚ s t ɹ eɪ n d aɪ z iː" " v ə n ð ə s ɔːɹ ɹ ɪ ŋ ɐ ɹ iː n ɐ ɚ ɹ aʊ n d h ɪ m w ɪ ð ə θ aʊ z ə n d z ʌ v s p ɛ k t eɪ ɾ ɚ z w ɜː t ɹ" " ɪ v ɪ æ l ᵻ ɾ i z n ɑː t w ɜː θ θ ɪ ŋ k ɪ ŋ ɐ b aʊ t", "h ɪ z ɪ n s t ə n t v p æ n ɪ k w ʌ z f ɑː l oʊ d b aɪ ɐ s m ɔː l ʃ ɑːɹ p b l oʊ h aɪ ɔ n h ɪ z tʃ ɛ s t", ] # should correspond to =>: # [ # "a man said to the universe sir i exist", # "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", # "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about", # "his instant panic was followed by a small sharp blow high on his chest", # ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @require_pyctcdecode @require_torchaudio @unittest.skipIf( is_torch_less_than_1_9, reason="`torchaudio.functional.resample` needs torchaudio >= 0.9 which requires torch >= 0.9", ) def test_wav2vec2_with_lm(self): ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits transcription = processor.batch_decode(logits.cpu().numpy()).text self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") @require_pyctcdecode @require_torchaudio @unittest.skipIf( is_torch_less_than_1_9, reason="`torchaudio.functional.resample` needs torchaudio >= 0.9 which requires torch >= 0.9", ) def test_wav2vec2_with_lm_pool(self): ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits # test user-managed pool with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") # user-managed pool + num_processes should trigger a warning with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool( 2 ) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool, num_processes=2).text self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_invalid_pool(self): run_test_in_subprocess(test_case=self, target_func=_test_wav2vec2_with_lm_invalid_pool, inputs=None) def test_inference_diarization(self): model = Wav2Vec2ForAudioFrameClassification.from_pretrained("anton-l/wav2vec2-base-superb-sd").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (outputs.logits > 0).long() # s3prl logits for the same batch expected_logits = torch.tensor( [ [[-5.2807, -5.1272], [-5.4059, -4.7757], [-5.2764, -4.9621], [-5.0117, -4.5851]], [[-1.7643, -0.5462], [-1.7369, -0.2649], [-1.5066, -0.6200], [-4.5703, -2.4863]], [[-0.8656, -0.4783], [-0.8899, -0.3289], [-0.9267, -0.5781], [-0.7817, -0.4619]], [[-4.8625, -2.5316], [-5.2339, -2.2155], [-4.9835, -2.0344], [-4.4727, -1.8421]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 555) self.assertEqual(labels[0, :, 1].sum(), 299) # TODO: update the tolerance after the CI moves to torch 1.10 self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): model = Wav2Vec2ForXVector.from_pretrained("anton-l/wav2vec2-base-superb-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1).cpu() cosine_sim = torch.nn.CosineSimilarity(dim=-1) # id10002 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).numpy(), 0.9758, 3) # id10006 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).numpy(), 0.7579, 3) # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).numpy(), 0.7594, 3) # TODO: update the tolerance after the CI moves to torch 1.10 self.assertAlmostEqual(outputs.loss.item(), 17.7963, 2)
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Wav2Vec2 model. """ import math import multiprocessing import os import pickle import tempfile import traceback import unittest import numpy as np from datasets import load_dataset from transformers import Wav2Vec2Config, is_torch_available from transformers.testing_utils import ( CaptureLogger, is_pt_flax_cross_test, is_pyctcdecode_available, is_torchaudio_available, require_pyctcdecode, require_soundfile, require_torch, require_torchaudio, run_test_in_subprocess, slow, torch_device, ) from transformers.utils import is_torch_fx_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ( ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor, random_attention_mask, ) from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( Wav2Vec2FeatureExtractor, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2ForPreTraining, Wav2Vec2ForSequenceClassification, Wav2Vec2ForXVector, Wav2Vec2Model, Wav2Vec2Processor, ) from transformers.models.wav2vec2.modeling_wav2vec2 import ( Wav2Vec2GumbelVectorQuantizer, _compute_mask_indices, _sample_negative_indices, ) from transformers.pytorch_utils import is_torch_less_than_1_9, torch_int_div else: is_torch_less_than_1_9 = True if is_torchaudio_available(): import torchaudio if is_pyctcdecode_available(): import pyctcdecode.decoder from transformers import Wav2Vec2ProcessorWithLM from transformers.models.wav2vec2_with_lm import processing_wav2vec2_with_lm if is_torch_fx_available(): from transformers.utils.fx import symbolic_trace def _test_wav2vec2_with_lm_invalid_pool(in_queue, out_queue, timeout): error = None try: _ = in_queue.get(timeout=timeout) ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits # use a spawn pool, which should trigger a warning if different than fork with CaptureLogger(pyctcdecode.decoder.logger) as cl, multiprocessing.get_context("spawn").Pool(1) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") # force batch_decode to internally create a spawn pool, which should trigger a warning if different than fork multiprocessing.set_start_method("spawn", force=True) with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl: transcription = processor.batch_decode(logits.cpu().numpy()).text unittest.TestCase().assertIn("Falling back to sequential decoding.", cl.out) unittest.TestCase().assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") except Exception: error = f"{traceback.format_exc()}" results = {"error": error} out_queue.put(results, timeout=timeout) out_queue.join() class Wav2Vec2ModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=4, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, mask_time_prob=0.5, mask_time_length=2, vocab_size=32, do_stable_layer_norm=False, num_adapter_layers=1, adapter_stride=2, tdnn_dim=(32, 32), tdnn_kernel=(5, 3), tdnn_dilation=(1, 2), xvector_output_dim=32, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.num_adapter_layers = num_adapter_layers self.adapter_stride = adapter_stride self.mask_time_prob = mask_time_prob self.mask_time_length = mask_time_length self.scope = scope self.tdnn_dim = tdnn_dim self.tdnn_kernel = tdnn_kernel self.tdnn_dilation = tdnn_dilation self.xvector_output_dim = xvector_output_dim output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length self.adapter_output_seq_length = (self.output_seq_length - 1) // adapter_stride + 1 def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], scale=1.0) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = self.get_config() return config, input_values, attention_mask def get_config(self): return Wav2Vec2Config( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, mask_time_prob=self.mask_time_prob, mask_time_length=self.mask_time_length, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, do_stable_layer_norm=self.do_stable_layer_norm, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, num_adapter_layers=self.num_adapter_layers, adapter_stride=self.adapter_stride, tdnn_dim=self.tdnn_dim, tdnn_kernel=self.tdnn_kernel, tdnn_dilation=self.tdnn_dilation, xvector_output_dim=self.xvector_output_dim, ) def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter(self, config, input_values, attention_mask): config.add_adapter = True model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, self.hidden_size) ) def create_and_check_model_with_adapter_for_ctc(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 2 * config.hidden_size model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.adapter_output_seq_length, self.vocab_size) ) def create_and_check_model_with_adapter_proj_dim(self, config, input_values, attention_mask): config.add_adapter = True config.output_hidden_size = 8 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.adapter_output_seq_length, config.output_hidden_size), ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() self.parent.assertTrue(isinstance(sum_loss, float)) self.parent.assertTrue(isinstance(mean_loss, float)) def check_seq_classifier_loss(self, config, input_values, *args): model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0 masked_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item() unmasked_loss = model(input_values, labels=labels).loss.item() self.parent.assertTrue(isinstance(masked_loss, float)) self.parent.assertTrue(isinstance(unmasked_loss, float)) self.parent.assertTrue(masked_loss != unmasked_loss) def check_ctc_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_encoder() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lenghts are at least # one shorter than logit lenghts to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_seq_classifier_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForSequenceClassification(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_xvector_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForXVector(config=config) model.to(torch_device) model.train() # freeze everything but the classification head model.freeze_base_model() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] labels = ids_tensor((input_values.shape[0], 1), len(model.config.id2label)) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def check_labels_out_of_vocab(self, config, input_values, *args): model = Wav2Vec2ForCTC(config) model.to(torch_device) model.train() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size + 100) with self.parent.assertRaises(ValueError): model(input_values, labels=labels) def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining) if is_torch_available() else () ) pipeline_model_mapping = ( { "audio-classification": Wav2Vec2ForSequenceClassification, "automatic-speech-recognition": Wav2Vec2ForCTC, "feature-extraction": Wav2Vec2Model, "fill-mask": Wav2Vec2ForMaskedLM, } if is_torch_available() else {} ) fx_compatible = True test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_for_ctc(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_for_ctc(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Wav2Vec2 has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # Wav2Vec2 cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # Wav2Vec2 has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass @is_pt_flax_cross_test # non-robust architecture does not exist in Flax def test_equivalence_flax_to_pt(self): pass @is_pt_flax_cross_test # non-robust architecture does not exist in Flax def test_equivalence_pt_to_flax(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any([x in name for x in uniform_init_parms]): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) # Wav2Vec2 cannot be torchscripted because of group norm. def _create_and_check_torch_fx_tracing(self, config, inputs_dict, output_loss=False): if not is_torch_fx_available() or not self.fx_compatible: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.return_dict = False for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=output_loss) try: input_names = [ "attention_mask", "bbox", "input_features", "input_ids", "input_values", "pixel_values", "token_type_ids", "visual_feats", "visual_pos", ] labels = inputs.get("labels", None) start_positions = inputs.get("start_positions", None) end_positions = inputs.get("end_positions", None) if labels is not None: input_names.append("labels") if start_positions is not None: input_names.append("start_positions") if end_positions is not None: input_names.append("end_positions") filtered_inputs = {k: v for (k, v) in inputs.items() if k in input_names} input_names = list(filtered_inputs.keys()) model_output = model(**filtered_inputs) if ( isinstance(model, Wav2Vec2ForSequenceClassification) and not hasattr(model.config, "problem_type") or model.config.problem_type is None ): model.config.problem_type = "single_label_classification" traced_model = symbolic_trace(model, input_names) traced_output = traced_model(**filtered_inputs) except Exception as e: self.fail(f"Couldn't trace module: {e}") def flatten_output(output): flatten = [] for x in output: if isinstance(x, (tuple, list)): flatten += flatten_output(x) elif not isinstance(x, torch.Tensor): continue else: flatten.append(x) return flatten model_output = flatten_output(model_output) traced_output = flatten_output(traced_output) num_outputs = len(model_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], traced_output[i]), f"traced {i}th output doesn't match model {i}th output for {model_class}", ) # Test that the model can be serialized and restored properly with tempfile.TemporaryDirectory() as tmp_dir_name: pkl_file_name = os.path.join(tmp_dir_name, "model.pkl") try: with open(pkl_file_name, "wb") as f: pickle.dump(traced_model, f) with open(pkl_file_name, "rb") as f: loaded = pickle.load(f) except Exception as e: self.fail(f"Couldn't serialize / deserialize the traced model: {e}") loaded_output = loaded(**filtered_inputs) loaded_output = flatten_output(loaded_output) for i in range(num_outputs): self.assertTrue( torch.allclose(model_output[i], loaded_output[i]), f"serialized model {i}th output doesn't match model {i}th output for {model_class}", ) # Avoid memory leak. Without this, each call increase RAM usage by ~20MB. # (Even with this call, there are still memory leak by ~0.04MB) self.clear_torch_jit_class_registry() @require_torch class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, Wav2Vec2ForSequenceClassification, Wav2Vec2ForPreTraining, Wav2Vec2ForAudioFrameClassification, Wav2Vec2ForXVector, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False def setUp(self): self.model_tester = Wav2Vec2ModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_with_adapter(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter(*config_and_inputs) def test_model_with_adapter_proj_dim(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_with_adapter_proj_dim(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_seq_classifier_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_loss(*config_and_inputs) def test_ctc_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_training(*config_and_inputs) def test_seq_classifier_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_seq_classifier_training(*config_and_inputs) def test_xvector_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_xvector_training(*config_and_inputs) def test_labels_out_of_vocab(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_labels_out_of_vocab(*config_and_inputs) # Wav2Vec2 has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # Wav2Vec2 cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # Wav2Vec2 has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): uniform_init_parms = [ "conv.weight", "masked_spec_embed", "codevectors", "quantizer.weight_proj.weight", "project_hid.weight", "project_hid.bias", "project_q.weight", "project_q.bias", "feature_projection.projection.weight", "feature_projection.projection.bias", "objective.weight", ] if param.requires_grad: if any([x in name for x in uniform_init_parms]): self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) # overwrite from test_modeling_common def _mock_init_weights(self, module): if hasattr(module, "weight") and module.weight is not None: module.weight.data.fill_(3) if hasattr(module, "weight_g") and module.weight_g is not None: module.weight_g.data.fill_(3) if hasattr(module, "weight_v") and module.weight_v is not None: module.weight_v.data.fill_(3) if hasattr(module, "bias") and module.bias is not None: module.bias.data.fill_(3) if hasattr(module, "codevectors") and module.codevectors is not None: module.codevectors.data.fill_(3) if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None: module.masked_spec_embed.data.fill_(3) def test_model_for_pretraining(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() model = Wav2Vec2ForPreTraining(config).to(torch_device) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss # more losses mask_time_indices[:, : mask_time_indices.shape[-1] // 2] = True sampled_negative_indices = _sample_negative_indices(features_shape, 10, mask_time_indices.cpu().numpy()) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) loss_more_masked = model( inputs_dict["input_values"], attention_mask=inputs_dict["attention_mask"], mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ).loss # loss_more_masked has to be bigger or equal loss since more masked inputs have to be predicted self.assertTrue(loss.detach().item() <= loss_more_masked.detach().item()) def test_mask_feature_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_feature_prob=0.2, mask_feature_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_prob_ctc(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_time_length=2 ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [1, 3, 2, 6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (4, 1498, 32)) def test_mask_time_feature_prob_ctc_single_batch(self): model = Wav2Vec2ForCTC.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", mask_time_prob=0.2, mask_feature_prob=0.2, mask_time_length=2, mask_feature_length=2, ) model.to(torch_device).train() processor = Wav2Vec2Processor.from_pretrained( "hf-internal-testing/tiny-random-wav2vec2", return_attention_mask=True ) batch_duration_in_seconds = [6] input_features = [np.random.random(16_000 * s) for s in batch_duration_in_seconds] batch = processor( input_features, padding=True, sampling_rate=processor.feature_extractor.sampling_rate, return_tensors="pt" ) logits = model( input_values=batch["input_values"].to(torch_device), attention_mask=batch["attention_mask"].to(torch_device), ).logits self.assertEqual(logits.shape, (1, 1498, 32)) @unittest.skip(reason="Feed forward chunking is not implemented") def test_feed_forward_chunking(self): pass @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @require_torch class Wav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) def test_compute_mask_indices_low_prob(self): # with these settings num_masked_spans=0.5, which means probabilistic rounding # ensures that in 5 out of 10 method calls, num_masked_spans=0, and in # the other 5 out of 10, cases num_masked_spans=1 n_trials = 100 batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 count_dimensions_masked = 0 count_dimensions_not_masked = 0 for _ in range(n_trials): mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) num_masks = torch.sum(mask).item() if num_masks > 0: count_dimensions_masked += 1 else: count_dimensions_not_masked += 1 # as we test for at least 10 masked dimension and at least # 10 non-masked dimension, this test could fail with probability: # P(100 coin flips, at most 9 heads) = 1.66e-18 self.assertGreater(count_dimensions_masked, int(n_trials * 0.1)) self.assertGreater(count_dimensions_not_masked, int(n_trials * 0.1)) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) mask = torch.from_numpy(mask).to(torch_device) # because of overlap mask don't have to add up exactly to `mask_prob * sequence_length`, but have to be smaller or equal for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) def test_compute_mask_indices_attn_mask_overlap(self): batch_size = 4 sequence_length = 80 mask_prob = 0.5 mask_length = 4 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) attention_mask[:2, sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) mask = torch.from_numpy(mask).to(torch_device) for batch_sum in mask.sum(axis=-1): self.assertTrue(int(batch_sum) <= mask_prob * sequence_length) self.assertTrue(mask[:2, sequence_length // 2 :].sum() == 0) def test_compute_mask_indices_short_audio(self): batch_size = 4 sequence_length = 100 mask_prob = 0.05 mask_length = 10 attention_mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) # force one example to be heavily padded attention_mask[0, 5:] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask, min_masks=2 ) # make sure that non-padded examples cannot be padded self.assertFalse(mask[0][attention_mask[0].to(torch.bool).cpu()].any()) def test_compute_perplexity(self): probs = torch.arange(100, device=torch_device).reshape(2, 5, 10) / 100 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs) self.assertTrue(abs(ppl.item() - 141.4291) < 1e-3) # mask half of the input mask = torch.ones((2,), device=torch_device, dtype=torch.bool) mask[0] = 0 ppl = Wav2Vec2GumbelVectorQuantizer._compute_perplexity(probs, mask) self.assertTrue(abs(ppl.item() - 58.6757) < 1e-3) def test_sample_negatives(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 sequence = torch_int_div(torch.arange(sequence_length * hidden_size, device=torch_device), hidden_size) features = sequence.view(sequence_length, hidden_size) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # sample negative indices sampled_negative_indices = _sample_negative_indices((batch_size, sequence_length), num_negatives, None) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) def test_sample_negatives_with_mask(self): batch_size = 2 sequence_length = 10 hidden_size = 4 num_negatives = 3 # second half of last input tensor is padded mask = torch.ones((batch_size, sequence_length), dtype=torch.long, device=torch_device) mask[-1, sequence_length // 2 :] = 0 sequence = torch_int_div(torch.arange(sequence_length * hidden_size, device=torch_device), hidden_size) features = sequence.view(sequence_length, hidden_size) # each value in vector consits of same value features = features[None, :].expand(batch_size, sequence_length, hidden_size).contiguous() # replace masked feature vectors with -100 to test that those are not sampled features = torch.where(mask[:, :, None].expand(features.shape).bool(), features, -100) # sample negative indices sampled_negative_indices = _sample_negative_indices( (batch_size, sequence_length), num_negatives, mask.cpu().numpy() ) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) negatives = features.view(-1, hidden_size)[sampled_negative_indices.long().view(-1)] negatives = negatives.view(batch_size, sequence_length, -1, hidden_size).permute(2, 0, 1, 3) self.assertTrue((negatives >= 0).all().item()) self.assertTrue(negatives.shape == (num_negatives, batch_size, sequence_length, hidden_size)) # make sure no negatively sampled vector is actually a positive one for negative in negatives: self.assertTrue(((negative - features) == 0).sum() == 0.0) # make sure that full vectors are sampled and not values of vectors => this means that `unique()` yields a single value for `hidden_size` dim self.assertEqual(negatives.unique(dim=-1).shape, (num_negatives, batch_size, sequence_length, 1)) @require_torch @require_soundfile @slow class Wav2Vec2ModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").filter( lambda x: x["id"] in [f"1272-141231-000{i}" for i in range(num_samples)] )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def _load_superb(self, task, num_samples): ds = load_dataset("anton-l/superb_dummy", task, split="test") return ds[:num_samples] def test_inference_ctc_normal(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around" " him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_inference_integration(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("facebook/wav2vec2-base") input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) np.random.seed(4) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # cosine similarity of model is all > 0.5 as model is # pre-trained on contrastive loss # fmt: off expected_cosine_sim_masked = torch.tensor([ 0.8523, 0.5860, 0.6905, 0.5557, 0.7456, 0.5249, 0.6639, 0.7654, 0.7565, 0.8167, 0.8222, 0.7960, 0.8034, 0.8166, 0.8310, 0.8263, 0.8274, 0.8258, 0.8179, 0.8412, 0.8536, 0.5098, 0.4728, 0.6461, 0.4498, 0.6002, 0.5774, 0.6457, 0.7123, 0.5668, 0.6866, 0.4960, 0.6293, 0.7423, 0.7419, 0.7526, 0.7768, 0.4898, 0.5393, 0.8183 ], device=torch_device) # fmt: on self.assertTrue(torch.allclose(cosine_sim_masked, expected_cosine_sim_masked, atol=1e-3)) def test_inference_pretrained(self): model = Wav2Vec2ForPreTraining.from_pretrained("facebook/wav2vec2-base") model.to(torch_device) feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim = torch.cosine_similarity(outputs.projected_states, outputs.projected_quantized_states, dim=-1) # retrieve cosine sim of masked features cosine_sim_masked = cosine_sim[mask_time_indices] # ... now compare to randomly initialized model config = Wav2Vec2Config.from_pretrained("facebook/wav2vec2-base") model_rand = Wav2Vec2ForPreTraining(config).to(torch_device).eval() with torch.no_grad(): outputs_rand = model_rand( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, ) # compute cosine similarity cosine_sim_rand = torch.cosine_similarity( outputs_rand.projected_states, outputs_rand.projected_quantized_states, dim=-1 ) # retrieve cosine sim of masked features cosine_sim_masked_rand = cosine_sim_rand[mask_time_indices] # a pretrained wav2vec2 model has learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states > 0.5 # a random wav2vec2 model has not learned to predict the quantized latent states # => the cosine similarity between quantized states and predicted states is very likely < 0.1 self.assertTrue(cosine_sim_masked.mean().item() - 5 * cosine_sim_masked_rand.mean().item() > 0) @unittest.skipIf(torch_device != "cpu", "cannot make deterministic on GPU") def test_loss_pretraining(self): model = Wav2Vec2ForPreTraining.from_pretrained( "facebook/wav2vec2-base", attention_dropout=0.0, feat_proj_dropout=0.0, hidden_dropout=0.0, layerdrop=0.0, ) model.to(torch_device).train() feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained( "facebook/wav2vec2-base", return_attention_mask=True ) input_speech = self._load_datasamples(2) inputs_dict = feature_extractor(input_speech, return_tensors="pt", padding=True) batch_size = inputs_dict["input_values"].shape[0] feature_seq_length = int(model._get_feat_extract_output_lengths(inputs_dict["input_values"].shape[1])) features_shape = (batch_size, feature_seq_length) torch.manual_seed(0) np.random.seed(0) mask_time_indices = _compute_mask_indices( features_shape, model.config.mask_time_prob, model.config.mask_time_length, min_masks=2, ) sampled_negative_indices = _sample_negative_indices( mask_time_indices.shape, model.config.num_negatives, mask_time_indices ) mask_time_indices = torch.from_numpy(mask_time_indices).to(torch_device) sampled_negative_indices = torch.from_numpy(sampled_negative_indices).to(torch_device) with torch.no_grad(): outputs = model( inputs_dict.input_values.to(torch_device), attention_mask=inputs_dict.attention_mask.to(torch_device), mask_time_indices=mask_time_indices, sampled_negative_indices=sampled_negative_indices, ) # check diversity loss num_codevectors = model.config.num_codevectors_per_group * model.config.num_codevector_groups diversity_loss = (num_codevectors - outputs.codevector_perplexity) / num_codevectors self.assertTrue(abs(diversity_loss.item() - 0.9538) < 1e-3) # check overall loss (contrastive loss + diversity loss) expected_loss = 116.7094 self.assertTrue(abs(outputs.loss.item() - expected_loss) < 1e-3) def test_inference_keyword_spotting(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ks").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ks") input_data = self._load_superb("ks", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [7, 6, 10, 9] # s3prl logits for the same batch expected_logits = torch.tensor([6.1186, 11.8961, 10.2931, 6.0898], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_inference_intent_classification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-ic").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-ic") input_data = self._load_superb("ic", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits_action, predicted_ids_action = torch.max(outputs.logits[:, :6], dim=-1) predicted_logits_object, predicted_ids_object = torch.max(outputs.logits[:, 6:20], dim=-1) predicted_logits_location, predicted_ids_location = torch.max(outputs.logits[:, 20:24], dim=-1) expected_labels_action = [0, 0, 2, 3] expected_logits_action = torch.tensor([0.4568, 11.0848, 1.6621, 9.3841], device=torch_device) expected_labels_object = [3, 10, 3, 4] expected_logits_object = torch.tensor([1.5322, 10.7094, 5.2469, 22.1318], device=torch_device) expected_labels_location = [0, 0, 0, 1] expected_logits_location = torch.tensor([1.5335, 6.5096, 10.5704, 11.0569], device=torch_device) self.assertListEqual(predicted_ids_action.tolist(), expected_labels_action) self.assertListEqual(predicted_ids_object.tolist(), expected_labels_object) self.assertListEqual(predicted_ids_location.tolist(), expected_labels_location) self.assertTrue(torch.allclose(predicted_logits_action, expected_logits_action, atol=1e-2)) self.assertTrue(torch.allclose(predicted_logits_object, expected_logits_object, atol=1e-2)) self.assertTrue(torch.allclose(predicted_logits_location, expected_logits_location, atol=1e-2)) def test_inference_speaker_identification(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-sid").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-sid") input_data = self._load_superb("si", 4) output_logits = [] with torch.no_grad(): for example in input_data["speech"]: input = processor(example, return_tensors="pt", padding=True) output = model(input.input_values.to(torch_device), attention_mask=None) output_logits.append(output.logits[0]) output_logits = torch.stack(output_logits) predicted_logits, predicted_ids = torch.max(output_logits, dim=-1) expected_labels = [251, 1, 1, 3] # s3prl logits for the same batch expected_logits = torch.tensor([37.5627, 71.6362, 64.2419, 31.7778], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_inference_emotion_recognition(self): model = Wav2Vec2ForSequenceClassification.from_pretrained("superb/wav2vec2-base-superb-er").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("superb/wav2vec2-base-superb-er") input_data = self._load_superb("er", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) predicted_logits, predicted_ids = torch.max(outputs.logits, dim=-1) expected_labels = [1, 1, 2, 2] # s3prl logits for the same batch expected_logits = torch.tensor([2.1722, 3.0779, 8.0287, 6.6797], device=torch_device) self.assertListEqual(predicted_ids.tolist(), expected_labels) self.assertTrue(torch.allclose(predicted_logits, expected_logits, atol=1e-2)) def test_phoneme_recognition(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-lv-60-espeak-cv-ft") input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "ɐ m æ n s ɛ d t ə ð ə j uː n ɪ v ɚ s s ɚ aɪ ɛ ɡ z ɪ s t", "s w ɛ t k ʌ v ɚ d b ɹ iː ɔ n z b ɑː d i t ɹ ɪ k l ɪ ŋ ɪ n t ə ð ə t aɪ t l oɪ n k l ɑː θ ð æ w ʌ z ð ɪ oʊ" " n l i ɡ ɑːɹ m ə n t h iː w ɔːɹ", "ð ə k aɪ t ɔ n h ɪ z tʃ ɛ s t s t ɪ l d ɹ ɪ p ɪ ŋ b l ʌ d ð ɪ eɪ k ʌ v h ɪ z oʊ v ɚ s t ɹ eɪ n d aɪ z iː" " v ə n ð ə s ɔːɹ ɹ ɪ ŋ ɐ ɹ iː n ɐ ɚ ɹ aʊ n d h ɪ m w ɪ ð ə θ aʊ z ə n d z ʌ v s p ɛ k t eɪ ɾ ɚ z w ɜː t ɹ" " ɪ v ɪ æ l ᵻ ɾ i z n ɑː t w ɜː θ θ ɪ ŋ k ɪ ŋ ɐ b aʊ t", "h ɪ z ɪ n s t ə n t v p æ n ɪ k w ʌ z f ɑː l oʊ d b aɪ ɐ s m ɔː l ʃ ɑːɹ p b l oʊ h aɪ ɔ n h ɪ z tʃ ɛ s t", ] # should correspond to =>: # [ # "a man said to the universe sir i exist", # "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", # "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about", # "his instant panic was followed by a small sharp blow high on his chest", # ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) @require_pyctcdecode @require_torchaudio @unittest.skipIf( is_torch_less_than_1_9, reason="`torchaudio.functional.resample` needs torchaudio >= 0.9 which requires torch >= 0.9", ) def test_wav2vec2_with_lm(self): ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits transcription = processor.batch_decode(logits.cpu().numpy()).text self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") @require_pyctcdecode @require_torchaudio @unittest.skipIf( is_torch_less_than_1_9, reason="`torchaudio.functional.resample` needs torchaudio >= 0.9 which requires torch >= 0.9", ) def test_wav2vec2_with_lm_pool(self): ds = load_dataset("common_voice", "es", split="test", streaming=True) sample = next(iter(ds)) resampled_audio = torchaudio.functional.resample( torch.tensor(sample["audio"]["array"]), 48_000, 16_000 ).numpy() model = Wav2Vec2ForCTC.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm").to( torch_device ) processor = Wav2Vec2ProcessorWithLM.from_pretrained("patrickvonplaten/wav2vec2-large-xlsr-53-spanish-with-lm") input_values = processor(resampled_audio, return_tensors="pt").input_values with torch.no_grad(): logits = model(input_values.to(torch_device)).logits # test user-managed pool with multiprocessing.get_context("fork").Pool(2) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool).text self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") # user-managed pool + num_processes should trigger a warning with CaptureLogger(processing_wav2vec2_with_lm.logger) as cl, multiprocessing.get_context("fork").Pool( 2 ) as pool: transcription = processor.batch_decode(logits.cpu().numpy(), pool, num_processes=2).text self.assertIn("num_process", cl.out) self.assertIn("it will be ignored", cl.out) self.assertEqual(transcription[0], "bien y qué regalo vas a abrir primero") @require_pyctcdecode @require_torchaudio def test_wav2vec2_with_lm_invalid_pool(self): run_test_in_subprocess(test_case=self, target_func=_test_wav2vec2_with_lm_invalid_pool, inputs=None) def test_inference_diarization(self): model = Wav2Vec2ForAudioFrameClassification.from_pretrained("anton-l/wav2vec2-base-superb-sd").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sd") input_data = self._load_superb("sd", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): outputs = model(input_values, attention_mask=attention_mask) # labels is a one-hot array of shape (num_frames, num_speakers) labels = (outputs.logits > 0).long() # s3prl logits for the same batch expected_logits = torch.tensor( [ [[-5.2807, -5.1272], [-5.4059, -4.7757], [-5.2764, -4.9621], [-5.0117, -4.5851]], [[-1.7643, -0.5462], [-1.7369, -0.2649], [-1.5066, -0.6200], [-4.5703, -2.4863]], [[-0.8656, -0.4783], [-0.8899, -0.3289], [-0.9267, -0.5781], [-0.7817, -0.4619]], [[-4.8625, -2.5316], [-5.2339, -2.2155], [-4.9835, -2.0344], [-4.4727, -1.8421]], ], device=torch_device, ) self.assertEqual(labels[0, :, 0].sum(), 555) self.assertEqual(labels[0, :, 1].sum(), 299) # TODO: update the tolerance after the CI moves to torch 1.10 self.assertTrue(torch.allclose(outputs.logits[:, :4], expected_logits, atol=1e-2)) def test_inference_speaker_verification(self): model = Wav2Vec2ForXVector.from_pretrained("anton-l/wav2vec2-base-superb-sv").to(torch_device) processor = Wav2Vec2FeatureExtractor.from_pretrained("anton-l/wav2vec2-base-superb-sv") input_data = self._load_superb("si", 4) inputs = processor(input_data["speech"], return_tensors="pt", padding=True, sampling_rate=16_000) labels = torch.tensor([5, 1, 1, 3], device=torch_device).T with torch.no_grad(): input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) outputs = model(input_values, attention_mask=attention_mask, labels=labels) embeddings = torch.nn.functional.normalize(outputs.embeddings, dim=-1).cpu() cosine_sim = torch.nn.CosineSimilarity(dim=-1) # id10002 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[1], embeddings[2]).numpy(), 0.9758, 3) # id10006 vs id10002 self.assertAlmostEqual(cosine_sim(embeddings[0], embeddings[1]).numpy(), 0.7579, 3) # id10002 vs id10004 self.assertAlmostEqual(cosine_sim(embeddings[2], embeddings[3]).numpy(), 0.7594, 3) # TODO: update the tolerance after the CI moves to torch 1.10 self.assertAlmostEqual(outputs.loss.item(), 17.7963, 2)
1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/lxmert/modeling_tf_lxmert.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the # Lxmert Authors. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 LXMERT model.""" import warnings from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from transformers.tf_utils import stable_softmax from ...activations_tf import get_tf_activation from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, get_initializer, keras_serializable, shape_list, unpack_inputs, ) from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_lxmert import LxmertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased" _CONFIG_FOR_DOC = "LxmertConfig" TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "unc-nlp/lxmert-base-uncased", ] @dataclass class TFLxmertModelOutput(ModelOutput): """ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" encoder") Args: language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ language_output: Optional[tf.Tensor] = None vision_output: Optional[tf.Tensor] = None pooled_output: Optional[tf.Tensor] = None language_hidden_states: Optional[Tuple[tf.Tensor]] = None vision_hidden_states: Optional[Tuple[tf.Tensor]] = None language_attentions: Optional[Tuple[tf.Tensor]] = None vision_attentions: Optional[Tuple[tf.Tensor]] = None cross_encoder_attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFLxmertForPreTrainingOutput(ModelOutput): """ Output type of [`LxmertForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score: (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score: (`tf.Tensor` of shape `(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None prediction_logits: Optional[tf.Tensor] = None cross_relationship_score: Optional[tf.Tensor] = None question_answering_score: Optional[tf.Tensor] = None language_hidden_states: Optional[Tuple[tf.Tensor]] = None vision_hidden_states: Optional[Tuple[tf.Tensor]] = None language_attentions: Optional[Tuple[tf.Tensor]] = None vision_attentions: Optional[Tuple[tf.Tensor]] = None cross_encoder_attentions: Optional[Tuple[tf.Tensor]] = None class TFLxmertVisualFeatureEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) # Object feature encoding self.visn_fc = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="visn_fc", ) self.visn_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="visn_layer_norm" ) # Box position encoding self.box_fc = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="box_fc", ) self.box_layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, visn_input, training=False): feats, boxes = visn_input x = self.visn_fc(feats) x = self.visn_layer_norm(x) y = self.box_fc(boxes) y = self.box_layer_norm(y) output = (x + y) / 2 output = self.dropout(output, training=training) return output class TFLxmertEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) super().build(input_shape) def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound # indices on GPU, returning zeros instead. This is a dangerous silent behavior. tf.debugging.assert_less( input_ids, tf.cast(self.config.vocab_size, dtype=input_ids.dtype), message=( "input_ids must be smaller than the embedding layer's input dimension (got" f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" ), ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFLxmertAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, context, attention_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function) attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TFLxmertIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class TFLxmertOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFLxmertAttentionOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFLxmertSelfAttentionLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self = TFLxmertAttention(config, name="self") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call(self, input_tensor, attention_mask, output_attentions, training=False): # Self attention attends to itself, thus keys and queries are the same (input_tensor). self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions) if output_attentions: attention_probs = self_output[1] attention_output = self.attention_output(self_output[0], input_tensor) return (attention_output, attention_probs) if output_attentions else (attention_output,) class TFLxmertCrossAttentionLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.att = TFLxmertAttention(config, name="att") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call( self, input_tensor, ctx_tensor, ctx_att_mask, output_attentions=False, training=False, ): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training) if output_attentions: attention_probs = output[1] attention_output = self.attention_output(output[0], input_tensor, training=training) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs class TFLxmertLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFLxmertSelfAttentionLayer(config, name="attention") self.intermediate = TFLxmertIntermediate(config, name="intermediate") self.transformer_output = TFLxmertOutput(config, name="output") def call(self, hidden_states, attention_mask, output_attentions, training=False): attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.transformer_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs class TFLxmertXLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention") # Self-attention Layers self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att") self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att") # Intermediate and Output Layers (FFNs) self.lang_inter = TFLxmertIntermediate(config, name="lang_inter") self.lang_output = TFLxmertOutput(config, name="lang_output") self.visn_inter = TFLxmertIntermediate(config, name="visn_inter") self.visn_output = TFLxmertOutput(config, name="visn_output") def cross_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, output_attentions, training=False, ): # Cross Attention # Keras saving and loading model *does not work* with the same inputs for two layers. lang_attention_lang_input = tf.identity(lang_input) visn_attention_lang_input = tf.identity(lang_input) lang_attention_visn_input = tf.identity(visn_input) visn_attention_visn_input = tf.identity(visn_input) lang_att_output = self.visual_attention( lang_attention_lang_input, lang_attention_visn_input, visn_attention_mask, output_attentions=output_attentions, training=training, ) visn_att_output = self.visual_attention( visn_attention_visn_input, visn_attention_lang_input, lang_attention_mask, output_attentions=output_attentions, training=training, ) return lang_att_output, visn_att_output def self_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, training=False, ): # Self Attention output_attentions = False lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training) visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training) return lang_att_output[0], visn_att_output[0] def output_fc(self, lang_input, visn_input, training=False): # FC layers lang_inter_output = self.lang_inter(lang_input) visn_inter_output = self.visn_inter(visn_input) # Layer output lang_output = self.lang_output(lang_inter_output, lang_input, training) visn_output = self.visn_output(visn_inter_output, visn_input, training) return lang_output, visn_output def call( self, lang_feats, lang_attention_mask, visn_feats, visn_attention_mask, output_attentions, training=False, ): lang_att_output = lang_feats visn_att_output = visn_feats lang_att_output, visn_att_output = self.cross_att( lang_att_output, lang_attention_mask, visn_att_output, visn_attention_mask, output_attentions, training=training, ) attention_probs = lang_att_output[1:] lang_att_output, visn_att_output = self.self_att( lang_att_output[0], lang_attention_mask, visn_att_output[0], visn_attention_mask, training=training, ) lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training) return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output) class TFLxmertEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc") # Number of layers self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers # Layers # Using self.layer instead of self.l_layer to support loading BERT weights. self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)] self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)] self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)] self.config = config def call( self, lang_feats=None, lang_attention_mask=None, visual_feats=None, visual_pos=None, visual_attention_mask=None, output_attentions=None, training=False, ): vision_hidden_states = () language_hidden_states = () vision_attentions = () if output_attentions or self.config.output_attentions else None language_attentions = () if output_attentions or self.config.output_attentions else None cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None visual_feats = self.visn_fc([visual_feats, visual_pos], training=training) # Run language layers for layer_module in self.layer: l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training) lang_feats = l_outputs[0] language_hidden_states = language_hidden_states + (lang_feats,) if language_attentions is not None: language_attentions = language_attentions + (l_outputs[1],) # Run relational layers for layer_module in self.r_layers: v_outputs = layer_module( visual_feats, visual_attention_mask, output_attentions, training=training, ) visual_feats = v_outputs[0] vision_hidden_states = vision_hidden_states + (visual_feats,) if vision_attentions is not None: vision_attentions = vision_attentions + (v_outputs[1],) # Run cross-modality layers for layer_module in self.x_layers: x_outputs = layer_module( lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions, training=training, ) lang_feats, visual_feats = x_outputs[:2] vision_hidden_states = vision_hidden_states + (visual_feats,) language_hidden_states = language_hidden_states + (lang_feats,) if cross_encoder_attentions is not None: cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) visual_encoder_outputs = ( vision_hidden_states, vision_attentions if output_attentions else None, ) lang_encoder_outputs = ( language_hidden_states, language_attentions if output_attentions else None, ) return ( visual_encoder_outputs, lang_encoder_outputs, cross_encoder_attentions if output_attentions else None, ) @keras_serializable class TFLxmertMainLayer(tf.keras.layers.Layer): config_class = LxmertConfig @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) return { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, } def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFLxmertEmbeddings(config, name="embeddings") self.encoder = TFLxmertEncoder(config, name="encoder") self.pooler = TFLxmertPooler(config, name="pooler") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call( self, input_ids=None, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if visual_pos is None or visual_feats is None: raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) # Positional Word Embeddings embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if visual_attention_mask is not None: extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1])) extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1) extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype) extended_visual_attention_mask = tf.multiply( tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst ) else: extended_visual_attention_mask = None # Run Lxmert encoder encoder_outputs = self.encoder( embedding_output, extended_attention_mask, visual_feats, visual_pos, extended_visual_attention_mask, output_attentions, training, ) visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] vision_hidden_states = visual_encoder_outputs[0] language_hidden_states = lang_encoder_outputs[0] all_attentions = () if output_attentions: language_attentions = lang_encoder_outputs[1] vision_attentions = visual_encoder_outputs[1] cross_encoder_attentions = encoder_outputs[2] all_attentions = ( language_attentions, vision_attentions, cross_encoder_attentions, ) hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () visual_output = vision_hidden_states[-1] lang_output = language_hidden_states[-1] pooled_output = self.pooler(lang_output) if not return_dict: return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions return TFLxmertModelOutput( pooled_output=pooled_output, language_output=lang_output, vision_output=visual_output, language_hidden_states=language_hidden_states if output_hidden_states else None, vision_hidden_states=vision_hidden_states if output_hidden_states else None, language_attentions=language_attentions if output_attentions else None, vision_attentions=vision_attentions if output_attentions else None, cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, ) class TFLxmertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LxmertConfig base_model_prefix = "lxmert" @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: return getattr(self, self.base_model_prefix).dummy_inputs @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "visual_feats": tf.TensorSpec((None, None, None), tf.float32, name="visual_feats"), "visual_pos": tf.TensorSpec((None, None, None), tf.float32, name="visual_pos"), "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) LXMERT_START_DOCSTRING = r""" The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction. This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`LxmertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LXMERT_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) visual_feats: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.", LXMERT_START_DOCSTRING, ) class TFLxmertModel(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.lxmert = TFLxmertMainLayer(config, name="lxmert") @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLxmertModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, visual_feats: Optional[tf.Tensor] = None, visual_pos: Optional[tf.Tensor] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, visual_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFLxmertModelOutput]: outputs = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) return outputs def serving_output(self, output): l_hs = tf.convert_to_tensor(output.language_hidden_states) if self.config.output_hidden_states else None v_hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None l_attns = tf.convert_to_tensor(output.language_attentions) if self.config.output_attentions else None v_attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None c_enc_attns = tf.convert_to_tensor(output.cross_encoder_attentions) if self.config.output_attentions else None return TFLxmertModelOutput( pooled_output=output.pooled_output, language_output=output.language_output, vision_output=output.vision_output, language_hidden_states=l_hs, vision_hidden_states=v_hs, language_attentions=l_attns, vision_attentions=v_attns, cross_encoder_attentions=c_enc_attns, ) class TFLxmertPooler(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) return pooled_output # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert class TFLxmertPredictionHeadTransform(tf.keras.layers.Layer): def __init__(self, config: LxmertConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert class TFLxmertLMPredictionHead(tf.keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape: tf.TensorShape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self) -> tf.keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert class TFLxmertMLMHead(tf.keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores class TFLxmertPreTrainingHeads(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") self.seq_relationship = tf.keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship", ) def call(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class TFLxmertVisualAnswerHead(tf.keras.layers.Layer): def __init__(self, config, num_labels, **kwargs): super().__init__(**kwargs) hid_dim = config.hidden_size self.dense = tf.keras.layers.Dense( hid_dim * 2, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._0", ) self.activation = get_tf_activation("gelu") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2") self.dense_1 = tf.keras.layers.Dense( num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._3", ) def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense_1(hidden_states) return hidden_states class TFLxmertVisualObjHead(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # Decide the use of visual losses visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} if config.visual_attr_loss: visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} if config.visual_feat_loss: visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim} self.visual_losses = visual_losses # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder_dict = { key: tf.keras.layers.Dense( self.visual_losses[key]["num"], kernel_initializer=get_initializer(config.initializer_range), name=f"decoder_dict.{key}", ) for key in self.visual_losses } def call(self, hidden_states): hidden_states = self.transform(hidden_states) output = {} for key in self.visual_losses: output[key] = self.decoder_dict[key](hidden_states) return output @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING) class TFLxmertForPreTraining(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer # Use of pretraining tasks self.task_mask_lm = config.task_mask_lm self.task_obj_predict = config.task_obj_predict self.task_matched = config.task_matched self.task_qa = config.task_qa # Lxmert backbone self.lxmert = TFLxmertMainLayer(config, name="lxmert") # Pre-training heads self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls") if self.task_obj_predict: self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head") if self.task_qa: self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head") # Loss functions self.loss_fcts = { "l2": tf.keras.losses.Huber(delta=1.0, name="huber_loss"), "visn_ce": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), "ce": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), } visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = { "shape": (-1,), "num": config.num_object_labels, "loss": "visn_ce", } if config.visual_attr_loss: visual_losses["attr"] = { "shape": (-1,), "num": config.num_attr_labels, "loss": "visn_ce", } if config.visual_feat_loss: visual_losses["feat"] = { "shape": (-1, config.visual_feat_dim), "num": config.visual_feat_dim, "loss": "l2", } self.visual_losses = visual_losses @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) if self.config.task_obj_predict: obj_labels = {} if self.config.visual_attr_loss and self.config.task_obj_predict: obj_labels["attr"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_feat_loss and self.config.task_obj_predict: obj_labels["feat"] = ( tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_obj_loss and self.config.task_obj_predict: obj_labels["obj"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) return { **{ "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, }, **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}), } def get_lm_head(self): return self.cls.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, masked_lm_labels=None, obj_labels=None, matched_label=None, ans=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` obj_labels: (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`): each key is named after each one of the visual losses and each element of the tuple is of the shape `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and the label score respectively matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans (`Torch.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`): a one hot representation hof the correct answer *optional* Returns: """ lxmert_output = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) lang_output, visual_output, pooled_output = ( lxmert_output[0], lxmert_output[1], lxmert_output[2], ) lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) if self.task_qa: answer_score = self.answer_head(pooled_output) else: answer_score = pooled_output[0][0] total_loss = ( None if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None) else tf.constant(0.0) ) losses = () if masked_lm_labels is not None and self.task_mask_lm: masked_lm_loss = self.loss_fcts["ce"]( tf.reshape(masked_lm_labels, [-1]), tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]), ) total_loss += masked_lm_loss losses += (masked_lm_loss,) if matched_label is not None and self.task_matched: matched_loss = self.loss_fcts["ce"]( tf.reshape(matched_label, [-1]), tf.reshape(cross_relationship_score, [-1, 2]), ) total_loss += matched_loss losses += (matched_loss,) if obj_labels is not None and self.task_obj_predict: total_visn_loss = 0.0 visn_prediction_scores_dict = self.obj_predict_head(visual_output) for key, key_info in self.visual_losses.items(): label, mask_conf = obj_labels[key] output_dim = key_info["num"] loss_fct_name = key_info["loss"] label_shape = key_info["shape"] weight = self.visual_loss_normalizer visn_loss_fct = self.loss_fcts[loss_fct_name] visn_prediction_scores = visn_prediction_scores_dict[key] visn_loss = visn_loss_fct( tf.reshape(label, label_shape), tf.reshape(visn_prediction_scores, [-1, output_dim]), ) if visn_loss.ndim > 1: # Regression Losses visn_loss = tf.reduce_mean(visn_loss) visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight total_visn_loss += visn_loss losses += (visn_loss,) total_loss += total_visn_loss if ans is not None and self.task_qa: answer_loss = self.loss_fcts["ce"]( tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels]) ) # exclude "*2" here to match the effect of QA losses. # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper) # Now : (loss *1) for 12 epochs # # * 2 # Multiply by 2 because > half of the data will not have label total_loss += answer_loss losses += (answer_loss,) # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach() if not return_dict: output = ( lang_prediction_scores, cross_relationship_score, answer_score, ) + lxmert_output[3:] return ((total_loss,) + output) if total_loss is not None else output return TFLxmertForPreTrainingOutput( loss=total_loss, prediction_logits=lang_prediction_scores, cross_relationship_score=cross_relationship_score, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) def serving_output(self, output): l_hs = tf.convert_to_tensor(output.language_hidden_states) if self.config.output_hidden_states else None v_hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None l_attns = tf.convert_to_tensor(output.language_attentions) if self.config.output_attentions else None v_attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None c_enc_attns = tf.convert_to_tensor(output.cross_encoder_attentions) if self.config.output_attentions else None return TFLxmertForPreTrainingOutput( prediction_logits=output.prediction_logits, cross_relationship_score=output.cross_relationship_score, question_answering_score=output.question_answering_score, language_hidden_states=l_hs, vision_hidden_states=v_hs, language_attentions=l_attns, vision_attentions=v_attns, cross_encoder_attentions=c_enc_attns, )
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors, The HuggingFace Inc. team, and the # Lxmert Authors. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 LXMERT model.""" import warnings from dataclasses import dataclass from typing import Dict, Optional, Tuple, Union import numpy as np import tensorflow as tf from transformers.tf_utils import stable_softmax from ...activations_tf import get_tf_activation from ...modeling_tf_utils import ( TFModelInputType, TFPreTrainedModel, get_initializer, keras_serializable, shape_list, unpack_inputs, ) from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_lxmert import LxmertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "unc-nlp/lxmert-base-uncased" _CONFIG_FOR_DOC = "LxmertConfig" TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "unc-nlp/lxmert-base-uncased", ] @dataclass class TFLxmertModelOutput(ModelOutput): """ Lxmert's outputs that contain the last hidden states, pooled outputs, and attention probabilities for the language, visual, and, cross-modality encoders. (note: the visual encoder in Lxmert is referred to as the "relation-ship" encoder") Args: language_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the language encoder. vision_output (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the visual encoder. pooled_output (`tf.Tensor` of shape `(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification, CLS, token) further processed by a Linear layer and a Tanh activation function. The Linear language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ language_output: Optional[tf.Tensor] = None vision_output: Optional[tf.Tensor] = None pooled_output: Optional[tf.Tensor] = None language_hidden_states: Optional[Tuple[tf.Tensor]] = None vision_hidden_states: Optional[Tuple[tf.Tensor]] = None language_attentions: Optional[Tuple[tf.Tensor]] = None vision_attentions: Optional[Tuple[tf.Tensor]] = None cross_encoder_attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFLxmertForPreTrainingOutput(ModelOutput): """ Output type of [`LxmertForPreTraining`]. Args: loss (*optional*, returned when `labels` is provided, `tf.Tensor` of shape `(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). cross_relationship_score: (`tf.Tensor` of shape `(batch_size, 2)`): Prediction scores of the textual matching objective (classification) head (scores of True/False continuation before SoftMax). question_answering_score: (`tf.Tensor` of shape `(batch_size, n_qa_answers)`): Prediction scores of question answering objective (classification). language_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. vision_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for input features + one for the output of each cross-modality layer) of shape `(batch_size, sequence_length, hidden_size)`. language_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. vision_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. cross_encoder_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None prediction_logits: Optional[tf.Tensor] = None cross_relationship_score: Optional[tf.Tensor] = None question_answering_score: Optional[tf.Tensor] = None language_hidden_states: Optional[Tuple[tf.Tensor]] = None vision_hidden_states: Optional[Tuple[tf.Tensor]] = None language_attentions: Optional[Tuple[tf.Tensor]] = None vision_attentions: Optional[Tuple[tf.Tensor]] = None cross_encoder_attentions: Optional[Tuple[tf.Tensor]] = None class TFLxmertVisualFeatureEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) # Object feature encoding self.visn_fc = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="visn_fc", ) self.visn_layer_norm = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_eps, name="visn_layer_norm" ) # Box position encoding self.box_fc = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="box_fc", ) self.box_layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="box_layer_norm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, visn_input, training=False): feats, boxes = visn_input x = self.visn_fc(feats) x = self.visn_layer_norm(x) y = self.box_fc(boxes) y = self.box_layer_norm(y) output = (x + y) / 2 output = self.dropout(output, training=training) return output class TFLxmertEmbeddings(tf.keras.layers.Layer): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.max_position_embeddings = config.max_position_embeddings self.initializer_range = config.initializer_range self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob) def build(self, input_shape): with tf.name_scope("word_embeddings"): self.weight = self.add_weight( name="weight", shape=[self.config.vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("token_type_embeddings"): self.token_type_embeddings = self.add_weight( name="embeddings", shape=[self.config.type_vocab_size, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) with tf.name_scope("position_embeddings"): self.position_embeddings = self.add_weight( name="embeddings", shape=[self.max_position_embeddings, self.hidden_size], initializer=get_initializer(initializer_range=self.initializer_range), ) super().build(input_shape) def call(self, input_ids=None, token_type_ids=None, inputs_embeds=None, training=False): """ Applies embedding based on inputs tensor. Returns: final_embeddings (`tf.Tensor`): output embedding tensor. """ assert not (input_ids is None and inputs_embeds is None) if input_ids is not None: # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound # indices on GPU, returning zeros instead. This is a dangerous silent behavior. tf.debugging.assert_less( input_ids, tf.cast(self.config.vocab_size, dtype=input_ids.dtype), message=( "input_ids must be smaller than the embedding layer's input dimension (got" f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" ), ) inputs_embeds = tf.gather(params=self.weight, indices=input_ids) input_shape = shape_list(inputs_embeds)[:-1] if token_type_ids is None: token_type_ids = tf.fill(dims=input_shape, value=0) position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0) position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids) token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids) final_embeddings = inputs_embeds + position_embeds + token_type_embeds final_embeddings = self.LayerNorm(inputs=final_embeddings) final_embeddings = self.dropout(inputs=final_embeddings, training=training) return final_embeddings class TFLxmertAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.hidden_size % config.num_attention_heads != 0: raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads assert config.hidden_size % config.num_attention_heads == 0 self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query", ) self.key = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key", ) self.value = tf.keras.layers.Dense( self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value", ) self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x, batch_size): # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size] x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size)) return tf.transpose(x, perm=[0, 2, 1, 3]) def call(self, hidden_states, context, attention_mask, output_attentions, training=False): batch_size = shape_list(hidden_states)[0] mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(context) mixed_value_layer = self.value(context) query_layer = self.transpose_for_scores(mixed_query_layer, batch_size) key_layer = self.transpose_for_scores(mixed_key_layer, batch_size) value_layer = self.transpose_for_scores(mixed_value_layer, batch_size) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = tf.matmul( query_layer, key_layer, transpose_b=True ) # (batch size, num_heads, seq_len_q, seq_len_k) dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores attention_scores = attention_scores / tf.math.sqrt(dk) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TFLxmertModel call() function) attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = stable_softmax(attention_scores, axis=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs, training=training) context_layer = tf.matmul(attention_probs, value_layer) context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) context_layer = tf.reshape( context_layer, (batch_size, -1, self.all_head_size) ) # (batch_size, seq_len_q, all_head_size) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class TFLxmertIntermediate(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = get_tf_activation(config.hidden_act) else: self.intermediate_act_fn = config.hidden_act def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class TFLxmertOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFLxmertAttentionOutput(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob) def call(self, hidden_states, input_tensor, training=False): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TFLxmertSelfAttentionLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.self = TFLxmertAttention(config, name="self") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call(self, input_tensor, attention_mask, output_attentions, training=False): # Self attention attends to itself, thus keys and queries are the same (input_tensor). self_output = self.self(input_tensor, input_tensor, attention_mask, output_attentions) if output_attentions: attention_probs = self_output[1] attention_output = self.attention_output(self_output[0], input_tensor) return (attention_output, attention_probs) if output_attentions else (attention_output,) class TFLxmertCrossAttentionLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.att = TFLxmertAttention(config, name="att") self.attention_output = TFLxmertAttentionOutput(config, name="output") def call( self, input_tensor, ctx_tensor, ctx_att_mask, output_attentions=False, training=False, ): output = self.att(input_tensor, ctx_tensor, ctx_att_mask, output_attentions, training=training) if output_attentions: attention_probs = output[1] attention_output = self.attention_output(output[0], input_tensor, training=training) outputs = (attention_output, attention_probs) if output_attentions else (attention_output,) return outputs class TFLxmertLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.attention = TFLxmertSelfAttentionLayer(config, name="attention") self.intermediate = TFLxmertIntermediate(config, name="intermediate") self.transformer_output = TFLxmertOutput(config, name="output") def call(self, hidden_states, attention_mask, output_attentions, training=False): attention_outputs = self.attention(hidden_states, attention_mask, output_attentions, training=training) attention_output = attention_outputs[0] intermediate_output = self.intermediate(attention_output) layer_output = self.transformer_output(intermediate_output, attention_output, training=training) outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them return outputs class TFLxmertXLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visual_attention = TFLxmertCrossAttentionLayer(config, name="visual_attention") # Self-attention Layers self.lang_self_att = TFLxmertSelfAttentionLayer(config, name="lang_self_att") self.visn_self_att = TFLxmertSelfAttentionLayer(config, name="visn_self_att") # Intermediate and Output Layers (FFNs) self.lang_inter = TFLxmertIntermediate(config, name="lang_inter") self.lang_output = TFLxmertOutput(config, name="lang_output") self.visn_inter = TFLxmertIntermediate(config, name="visn_inter") self.visn_output = TFLxmertOutput(config, name="visn_output") def cross_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, output_attentions, training=False, ): # Cross Attention # Keras saving and loading model *does not work* with the same inputs for two layers. lang_attention_lang_input = tf.identity(lang_input) visn_attention_lang_input = tf.identity(lang_input) lang_attention_visn_input = tf.identity(visn_input) visn_attention_visn_input = tf.identity(visn_input) lang_att_output = self.visual_attention( lang_attention_lang_input, lang_attention_visn_input, visn_attention_mask, output_attentions=output_attentions, training=training, ) visn_att_output = self.visual_attention( visn_attention_visn_input, visn_attention_lang_input, lang_attention_mask, output_attentions=output_attentions, training=training, ) return lang_att_output, visn_att_output def self_att( self, lang_input, lang_attention_mask, visn_input, visn_attention_mask, training=False, ): # Self Attention output_attentions = False lang_att_output = self.lang_self_att(lang_input, lang_attention_mask, output_attentions, training=training) visn_att_output = self.visn_self_att(visn_input, visn_attention_mask, output_attentions, training=training) return lang_att_output[0], visn_att_output[0] def output_fc(self, lang_input, visn_input, training=False): # FC layers lang_inter_output = self.lang_inter(lang_input) visn_inter_output = self.visn_inter(visn_input) # Layer output lang_output = self.lang_output(lang_inter_output, lang_input, training) visn_output = self.visn_output(visn_inter_output, visn_input, training) return lang_output, visn_output def call( self, lang_feats, lang_attention_mask, visn_feats, visn_attention_mask, output_attentions, training=False, ): lang_att_output = lang_feats visn_att_output = visn_feats lang_att_output, visn_att_output = self.cross_att( lang_att_output, lang_attention_mask, visn_att_output, visn_attention_mask, output_attentions, training=training, ) attention_probs = lang_att_output[1:] lang_att_output, visn_att_output = self.self_att( lang_att_output[0], lang_attention_mask, visn_att_output[0], visn_attention_mask, training=training, ) lang_output, visn_output = self.output_fc(lang_att_output, visn_att_output, training=training) return (lang_output, visn_output, attention_probs[0]) if output_attentions else (lang_output, visn_output) class TFLxmertEncoder(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.visn_fc = TFLxmertVisualFeatureEncoder(config, name="visn_fc") # Number of layers self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers # Layers # Using self.layer instead of self.l_layer to support loading BERT weights. self.layer = [TFLxmertLayer(config, name=f"layer_._{i}") for i in range(self.num_l_layers)] self.x_layers = [TFLxmertXLayer(config, name=f"x_layers_._{i}") for i in range(self.num_x_layers)] self.r_layers = [TFLxmertLayer(config, name=f"r_layers_._{i}") for i in range(self.num_r_layers)] self.config = config def call( self, lang_feats=None, lang_attention_mask=None, visual_feats=None, visual_pos=None, visual_attention_mask=None, output_attentions=None, training=False, ): vision_hidden_states = () language_hidden_states = () vision_attentions = () if output_attentions or self.config.output_attentions else None language_attentions = () if output_attentions or self.config.output_attentions else None cross_encoder_attentions = () if output_attentions or self.config.output_attentions else None visual_feats = self.visn_fc([visual_feats, visual_pos], training=training) # Run language layers for layer_module in self.layer: l_outputs = layer_module(lang_feats, lang_attention_mask, output_attentions, training=training) lang_feats = l_outputs[0] language_hidden_states = language_hidden_states + (lang_feats,) if language_attentions is not None: language_attentions = language_attentions + (l_outputs[1],) # Run relational layers for layer_module in self.r_layers: v_outputs = layer_module( visual_feats, visual_attention_mask, output_attentions, training=training, ) visual_feats = v_outputs[0] vision_hidden_states = vision_hidden_states + (visual_feats,) if vision_attentions is not None: vision_attentions = vision_attentions + (v_outputs[1],) # Run cross-modality layers for layer_module in self.x_layers: x_outputs = layer_module( lang_feats, lang_attention_mask, visual_feats, visual_attention_mask, output_attentions, training=training, ) lang_feats, visual_feats = x_outputs[:2] vision_hidden_states = vision_hidden_states + (visual_feats,) language_hidden_states = language_hidden_states + (lang_feats,) if cross_encoder_attentions is not None: cross_encoder_attentions = cross_encoder_attentions + (x_outputs[2],) visual_encoder_outputs = ( vision_hidden_states, vision_attentions if output_attentions else None, ) lang_encoder_outputs = ( language_hidden_states, language_attentions if output_attentions else None, ) return ( visual_encoder_outputs, lang_encoder_outputs, cross_encoder_attentions if output_attentions else None, ) @keras_serializable class TFLxmertMainLayer(tf.keras.layers.Layer): config_class = LxmertConfig @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) return { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, } def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.num_l_layers = config.l_layers self.num_x_layers = config.x_layers self.num_r_layers = config.r_layers self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.return_dict = config.use_return_dict self.embeddings = TFLxmertEmbeddings(config, name="embeddings") self.encoder = TFLxmertEncoder(config, name="encoder") self.pooler = TFLxmertPooler(config, name="pooler") self.config = config def get_input_embeddings(self): return self.embeddings def set_input_embeddings(self, value): self.embeddings.weight = value self.embeddings.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): raise NotImplementedError @unpack_inputs def call( self, input_ids=None, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if visual_pos is None or visual_feats is None: raise ValueError("visual_feats and visual_pos cannot be `None` in LXMERT's `call` method.") if attention_mask is None: attention_mask = tf.fill(input_shape, 1) if token_type_ids is None: token_type_ids = tf.fill(input_shape, 0) # Positional Word Embeddings embedding_output = self.embeddings(input_ids, token_type_ids, inputs_embeds, training) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype) one_cst = tf.constant(1.0, dtype=embedding_output.dtype) ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype) extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst) if visual_attention_mask is not None: extended_visual_attention_mask = tf.reshape(visual_attention_mask, (input_shape[0], 1, 1, input_shape[1])) extended_visual_attention_mask = tf.expand_dims(tf.expand_dims(visual_attention_mask, axis=1), axis=1) extended_visual_attention_mask = tf.cast(extended_visual_attention_mask, dtype=embedding_output.dtype) extended_visual_attention_mask = tf.multiply( tf.subtract(one_cst, extended_visual_attention_mask), ten_thousand_cst ) else: extended_visual_attention_mask = None # Run Lxmert encoder encoder_outputs = self.encoder( embedding_output, extended_attention_mask, visual_feats, visual_pos, extended_visual_attention_mask, output_attentions, training, ) visual_encoder_outputs, lang_encoder_outputs = encoder_outputs[:2] vision_hidden_states = visual_encoder_outputs[0] language_hidden_states = lang_encoder_outputs[0] all_attentions = () if output_attentions: language_attentions = lang_encoder_outputs[1] vision_attentions = visual_encoder_outputs[1] cross_encoder_attentions = encoder_outputs[2] all_attentions = ( language_attentions, vision_attentions, cross_encoder_attentions, ) hidden_states = (language_hidden_states, vision_hidden_states) if output_hidden_states else () visual_output = vision_hidden_states[-1] lang_output = language_hidden_states[-1] pooled_output = self.pooler(lang_output) if not return_dict: return (lang_output, visual_output, pooled_output) + hidden_states + all_attentions return TFLxmertModelOutput( pooled_output=pooled_output, language_output=lang_output, vision_output=visual_output, language_hidden_states=language_hidden_states if output_hidden_states else None, vision_hidden_states=vision_hidden_states if output_hidden_states else None, language_attentions=language_attentions if output_attentions else None, vision_attentions=vision_attentions if output_attentions else None, cross_encoder_attentions=cross_encoder_attentions if output_attentions else None, ) class TFLxmertPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LxmertConfig base_model_prefix = "lxmert" @property def dummy_inputs(self) -> Dict[str, tf.Tensor]: return getattr(self, self.base_model_prefix).dummy_inputs @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "visual_feats": tf.TensorSpec((None, None, None), tf.float32, name="visual_feats"), "visual_pos": tf.TensorSpec((None, None, None), tf.float32, name="visual_pos"), "visual_attention_mask": tf.TensorSpec((None, None), tf.int32, name="visual_attention_mask"), "token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) LXMERT_START_DOCSTRING = r""" The LXMERT model was proposed in [LXMERT: Learning Cross-Modality Encoder Representations from Transformers](https://arxiv.org/abs/1908.07490) by Hao Tan and Mohit Bansal. It's a vision and language transformer model, pre-trained on a variety of multi-modal datasets comprising of GQA, VQAv2.0, MCSCOCO captions, and Visual genome, using a combination of masked language modeling, region of interest feature regression, cross entropy loss for question answering attribute prediction, and object tag prediction. This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`LxmertConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ LXMERT_INPUTS_DOCSTRING = r""" Args: input_ids (`np.ndarray` or `tf.Tensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) visual_feats: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents visual features. They ROI pooled object features from bounding boxes using a faster-RCNN model) These are currently not provided by the transformers library. visual_pos: (`tf.Tensor` of shape `(batch_size, num_visual_features, visual_feat_dim)`): This input represents spacial features corresponding to their relative (via index) visual features. The pre-trained LXMERT model expects these spacial features to be normalized bounding boxes on a scale of 0 to 1. These are currently not provided by the transformers library. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) visual_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): MMask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare Lxmert Model transformer outputting raw hidden-states without any specific head on top.", LXMERT_START_DOCSTRING, ) class TFLxmertModel(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.lxmert = TFLxmertMainLayer(config, name="lxmert") @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFLxmertModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, visual_feats: Optional[tf.Tensor] = None, visual_pos: Optional[tf.Tensor] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, visual_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: bool = False, ) -> Union[Tuple, TFLxmertModelOutput]: outputs = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) return outputs def serving_output(self, output): l_hs = tf.convert_to_tensor(output.language_hidden_states) if self.config.output_hidden_states else None v_hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None l_attns = tf.convert_to_tensor(output.language_attentions) if self.config.output_attentions else None v_attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None c_enc_attns = tf.convert_to_tensor(output.cross_encoder_attentions) if self.config.output_attentions else None return TFLxmertModelOutput( pooled_output=output.pooled_output, language_output=output.language_output, vision_output=output.vision_output, language_hidden_states=l_hs, vision_hidden_states=v_hs, language_attentions=l_attns, vision_attentions=v_attns, cross_encoder_attentions=c_enc_attns, ) class TFLxmertPooler(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), activation="tanh", name="dense", ) def call(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) return pooled_output # Copied from transformers.models.bert.modeling_tf_bert.TFBertPredictionHeadTransform with Bert->Lxmert class TFLxmertPredictionHeadTransform(tf.keras.layers.Layer): def __init__(self, config: LxmertConfig, **kwargs): super().__init__(**kwargs) self.dense = tf.keras.layers.Dense( units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense", ) if isinstance(config.hidden_act, str): self.transform_act_fn = get_tf_activation(config.hidden_act) else: self.transform_act_fn = config.hidden_act self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm") def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.dense(inputs=hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(inputs=hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMPredictionHead with Bert->Lxmert class TFLxmertLMPredictionHead(tf.keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.config = config self.hidden_size = config.hidden_size self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape: tf.TensorShape): self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self) -> tf.keras.layers.Layer: return self.input_embeddings def set_output_embeddings(self, value: tf.Variable): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self) -> Dict[str, tf.Variable]: return {"bias": self.bias} def set_bias(self, value: tf.Variable): self.bias = value["bias"] self.config.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states: tf.Tensor) -> tf.Tensor: hidden_states = self.transform(hidden_states=hidden_states) seq_length = shape_list(hidden_states)[1] hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.hidden_size]) hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True) hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size]) hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias) return hidden_states # Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->Lxmert class TFLxmertMLMHead(tf.keras.layers.Layer): def __init__(self, config: LxmertConfig, input_embeddings: tf.keras.layers.Layer, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") def call(self, sequence_output: tf.Tensor) -> tf.Tensor: prediction_scores = self.predictions(hidden_states=sequence_output) return prediction_scores class TFLxmertPreTrainingHeads(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.predictions = TFLxmertLMPredictionHead(config, input_embeddings, name="predictions") self.seq_relationship = tf.keras.layers.Dense( 2, kernel_initializer=get_initializer(config.initializer_range), name="seq_relationship", ) def call(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class TFLxmertVisualAnswerHead(tf.keras.layers.Layer): def __init__(self, config, num_labels, **kwargs): super().__init__(**kwargs) hid_dim = config.hidden_size self.dense = tf.keras.layers.Dense( hid_dim * 2, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._0", ) self.activation = get_tf_activation("gelu") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="logit_fc_._2") self.dense_1 = tf.keras.layers.Dense( num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logit_fc_._3", ) def call(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.dense_1(hidden_states) return hidden_states class TFLxmertVisualObjHead(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.transform = TFLxmertPredictionHeadTransform(config, name="transform") # Decide the use of visual losses visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = {"shape": (-1,), "num": config.num_object_labels} if config.visual_attr_loss: visual_losses["attr"] = {"shape": (-1,), "num": config.num_attr_labels} if config.visual_feat_loss: visual_losses["feat"] = {"shape": (-1, 2048), "num": config.visual_feat_dim} self.visual_losses = visual_losses # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder_dict = { key: tf.keras.layers.Dense( self.visual_losses[key]["num"], kernel_initializer=get_initializer(config.initializer_range), name=f"decoder_dict.{key}", ) for key in self.visual_losses } def call(self, hidden_states): hidden_states = self.transform(hidden_states) output = {} for key in self.visual_losses: output[key] = self.decoder_dict[key](hidden_states) return output @add_start_docstrings("""Lxmert Model with a `language modeling` head on top.""", LXMERT_START_DOCSTRING) class TFLxmertForPreTraining(TFLxmertPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.config = config self.num_qa_labels = config.num_qa_labels self.visual_loss_normalizer = config.visual_loss_normalizer # Use of pretraining tasks self.task_mask_lm = config.task_mask_lm self.task_obj_predict = config.task_obj_predict self.task_matched = config.task_matched self.task_qa = config.task_qa # Lxmert backbone self.lxmert = TFLxmertMainLayer(config, name="lxmert") # Pre-training heads self.cls = TFLxmertPreTrainingHeads(config, self.lxmert.embeddings, name="cls") if self.task_obj_predict: self.obj_predict_head = TFLxmertVisualObjHead(config, name="obj_predict_head") if self.task_qa: self.answer_head = TFLxmertVisualAnswerHead(config, self.num_qa_labels, name="answer_head") # Loss functions self.loss_fcts = { "l2": tf.keras.losses.Huber(delta=1.0, name="huber_loss"), "visn_ce": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), "ce": tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), } visual_losses = {} if config.visual_obj_loss: visual_losses["obj"] = { "shape": (-1,), "num": config.num_object_labels, "loss": "visn_ce", } if config.visual_attr_loss: visual_losses["attr"] = { "shape": (-1,), "num": config.num_attr_labels, "loss": "visn_ce", } if config.visual_feat_loss: visual_losses["feat"] = { "shape": (-1, config.visual_feat_dim), "num": config.visual_feat_dim, "loss": "l2", } self.visual_losses = visual_losses @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ batch_size = 2 num_visual_features = 10 input_ids = tf.constant([[3, 5, 6], [2, 3, 4]], dtype=tf.int32) visual_feats = tf.random.uniform((batch_size, num_visual_features, self.config.visual_feat_dim)) visual_pos = tf.random.uniform((batch_size, num_visual_features, 4)) if self.config.task_obj_predict: obj_labels = {} if self.config.visual_attr_loss and self.config.task_obj_predict: obj_labels["attr"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_feat_loss and self.config.task_obj_predict: obj_labels["feat"] = ( tf.ones([batch_size, num_visual_features, self.config.visual_feat_dim]), tf.ones([batch_size, num_visual_features]), ) if self.config.visual_obj_loss and self.config.task_obj_predict: obj_labels["obj"] = ( tf.ones([batch_size, num_visual_features]), tf.ones([batch_size, num_visual_features]), ) return { **{ "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": visual_pos, }, **({"obj_labels": obj_labels} if self.config.task_obj_predict else {}), } def get_lm_head(self): return self.cls.predictions def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.cls.name + "/" + self.cls.predictions.name @unpack_inputs @add_start_docstrings_to_model_forward(LXMERT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFLxmertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, visual_feats=None, visual_pos=None, attention_mask=None, visual_attention_mask=None, token_type_ids=None, inputs_embeds=None, masked_lm_labels=None, obj_labels=None, matched_label=None, ans=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, ): r""" masked_lm_labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]` obj_labels: (`Dict[Str: Tuple[tf.Tensor, tf.Tensor]]`, *optional*, defaults to `None`): each key is named after each one of the visual losses and each element of the tuple is of the shape `(batch_size, num_features)` and `(batch_size, num_features, visual_feature_dim)` for each the label id and the label score respectively matched_label (`tf.Tensor` of shape `(batch_size,)`, *optional*): Labels for computing the whether or not the text input matches the image (classification) loss. Input should be a sequence pair (see `input_ids` docstring) Indices should be in `[0, 1]`: - 0 indicates that the sentence does not match the image, - 1 indicates that the sentence does match the image. ans (`Torch.Tensor` of shape `(batch_size)`, *optional*, defaults to `None`): a one hot representation hof the correct answer *optional* Returns: """ lxmert_output = self.lxmert( input_ids, visual_feats, visual_pos, attention_mask, visual_attention_mask, token_type_ids, inputs_embeds, output_attentions, output_hidden_states, return_dict, training, ) lang_output, visual_output, pooled_output = ( lxmert_output[0], lxmert_output[1], lxmert_output[2], ) lang_prediction_scores, cross_relationship_score = self.cls(lang_output, pooled_output) if self.task_qa: answer_score = self.answer_head(pooled_output) else: answer_score = pooled_output[0][0] total_loss = ( None if (masked_lm_labels is None and matched_label is None and obj_labels is None and ans is None) else tf.constant(0.0) ) losses = () if masked_lm_labels is not None and self.task_mask_lm: masked_lm_loss = self.loss_fcts["ce"]( tf.reshape(masked_lm_labels, [-1]), tf.reshape(lang_prediction_scores, [-1, self.config.vocab_size]), ) total_loss += masked_lm_loss losses += (masked_lm_loss,) if matched_label is not None and self.task_matched: matched_loss = self.loss_fcts["ce"]( tf.reshape(matched_label, [-1]), tf.reshape(cross_relationship_score, [-1, 2]), ) total_loss += matched_loss losses += (matched_loss,) if obj_labels is not None and self.task_obj_predict: total_visn_loss = 0.0 visn_prediction_scores_dict = self.obj_predict_head(visual_output) for key, key_info in self.visual_losses.items(): label, mask_conf = obj_labels[key] output_dim = key_info["num"] loss_fct_name = key_info["loss"] label_shape = key_info["shape"] weight = self.visual_loss_normalizer visn_loss_fct = self.loss_fcts[loss_fct_name] visn_prediction_scores = visn_prediction_scores_dict[key] visn_loss = visn_loss_fct( tf.reshape(label, label_shape), tf.reshape(visn_prediction_scores, [-1, output_dim]), ) if visn_loss.ndim > 1: # Regression Losses visn_loss = tf.reduce_mean(visn_loss) visn_loss = tf.reduce_mean(visn_loss * tf.cast(tf.reshape(mask_conf, [-1]), visn_loss.dtype)) * weight total_visn_loss += visn_loss losses += (visn_loss,) total_loss += total_visn_loss if ans is not None and self.task_qa: answer_loss = self.loss_fcts["ce"]( tf.reshape(ans, [-1]), tf.reshape(answer_score, [-1, self.num_qa_labels]) ) # exclude "*2" here to match the effect of QA losses. # Previous: (loss *0) for 6 epochs, (loss *2) for 6 epochs. (Used 10 instead of 6 in EMNLP paper) # Now : (loss *1) for 12 epochs # # * 2 # Multiply by 2 because > half of the data will not have label total_loss += answer_loss losses += (answer_loss,) # return total_loss, tf.stack(losses)[tf.new_axis, ...], answer_score.detach() if not return_dict: output = ( lang_prediction_scores, cross_relationship_score, answer_score, ) + lxmert_output[3:] return ((total_loss,) + output) if total_loss is not None else output return TFLxmertForPreTrainingOutput( loss=total_loss, prediction_logits=lang_prediction_scores, cross_relationship_score=cross_relationship_score, question_answering_score=answer_score, language_hidden_states=lxmert_output.language_hidden_states, vision_hidden_states=lxmert_output.vision_hidden_states, language_attentions=lxmert_output.language_attentions, vision_attentions=lxmert_output.vision_attentions, cross_encoder_attentions=lxmert_output.cross_encoder_attentions, ) def serving_output(self, output): l_hs = tf.convert_to_tensor(output.language_hidden_states) if self.config.output_hidden_states else None v_hs = tf.convert_to_tensor(output.vision_hidden_states) if self.config.output_hidden_states else None l_attns = tf.convert_to_tensor(output.language_attentions) if self.config.output_attentions else None v_attns = tf.convert_to_tensor(output.vision_attentions) if self.config.output_attentions else None c_enc_attns = tf.convert_to_tensor(output.cross_encoder_attentions) if self.config.output_attentions else None return TFLxmertForPreTrainingOutput( prediction_logits=output.prediction_logits, cross_relationship_score=output.cross_relationship_score, question_answering_score=output.question_answering_score, language_hidden_states=l_hs, vision_hidden_states=v_hs, language_attentions=l_attns, vision_attentions=v_attns, cross_encoder_attentions=c_enc_attns, )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/splinter/modeling_splinter.py
# coding=utf-8 # Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Splinter model.""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, ModelOutput, QuestionAnsweringModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_splinter import SplinterConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "tau/splinter-base" _CONFIG_FOR_DOC = "SplinterConfig" SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "tau/splinter-base", "tau/splinter-base-qass", "tau/splinter-large", "tau/splinter-large-qass", # See all Splinter models at https://huggingface.co/models?filter=splinter ] class SplinterEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: Optional[int] = 0, ) -> Tuple: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Splinter class SplinterSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in SplinterModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Splinter class SplinterSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Splinter class SplinterAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = SplinterSelfAttention(config, position_embedding_type=position_embedding_type) self.output = SplinterSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Splinter class SplinterIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Splinter class SplinterOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Splinter class SplinterLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SplinterAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = SplinterAttention(config, position_embedding_type="absolute") self.intermediate = SplinterIntermediate(config) self.output = SplinterOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Splinter class SplinterEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([SplinterLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class SplinterPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SplinterConfig base_model_prefix = "splinter" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, SplinterEncoder): module.gradient_checkpointing = value SPLINTER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`SplinterConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SPLINTER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `{0}`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Splinter Model transformer outputting raw hidden-states without any specific head on top.", SPLINTER_START_DOCSTRING, ) class SplinterModel(SplinterPreTrainedModel): """ The model is an encoder (with only self-attention) following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = SplinterEmbeddings(config) self.encoder = SplinterEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SplinterFullyConnectedLayer(nn.Module): def __init__(self, input_dim, output_dim, hidden_act="gelu"): super().__init__() self.input_dim = input_dim self.output_dim = output_dim self.dense = nn.Linear(self.input_dim, self.output_dim) self.act_fn = ACT2FN[hidden_act] self.LayerNorm = nn.LayerNorm(self.output_dim) def forward(self, inputs: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(inputs) hidden_states = self.act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class QuestionAwareSpanSelectionHead(nn.Module): """ Implementation of Question-Aware Span Selection (QASS) head, described in Splinter's paper: """ def __init__(self, config): super().__init__() self.query_start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.query_end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.start_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.end_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False) def forward(self, inputs, positions): _, _, dim = inputs.size() index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim] gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim] query_start_reps = self.query_start_transform(gathered_reps) # [batch_size, num_positions, dim] query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim] start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim] end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim] hidden_states = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim] start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length] start_logits = torch.matmul(hidden_states, start_reps) hidden_states = self.end_classifier(query_end_reps) end_reps = end_reps.permute(0, 2, 1) end_logits = torch.matmul(hidden_states, end_reps) return start_logits, end_logits @add_start_docstrings( """ Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, SPLINTER_START_DOCSTRING, ) class SplinterForQuestionAnswering(SplinterPreTrainedModel): def __init__(self, config): super().__init__(config) self.splinter = SplinterModel(config) self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, question_positions: Optional[torch.LongTensor] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size, num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size, sequence_length)`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict question_positions_were_none = False if question_positions is None: if input_ids is not None: question_position_for_each_example = torch.argmax( (torch.eq(input_ids, self.question_token_id)).int(), dim=-1 ) else: question_position_for_each_example = torch.zeros( inputs_embeds.size(0), dtype=torch.long, layout=inputs_embeds.layout, device=inputs_embeds.device ) question_positions = question_position_for_each_example.unsqueeze(-1) question_positions_were_none = True outputs = self.splinter( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] start_logits, end_logits = self.splinter_qass(sequence_output, question_positions) if question_positions_were_none: start_logits, end_logits = start_logits.squeeze(1), end_logits.squeeze(1) if attention_mask is not None: start_logits = start_logits + (1 - attention_mask) * torch.finfo(start_logits.dtype).min end_logits = end_logits + (1 - attention_mask) * torch.finfo(end_logits.dtype).min total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @dataclass class SplinterForPreTrainingOutput(ModelOutput): """ Class for outputs of Splinter as a span selection model. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when start and end positions are provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( """ Splinter Model for the recurring span selection task as done during the pretraining. The difference to the QA task is that we do not have a question, but multiple question tokens that replace the occurrences of recurring spans instead. """, SPLINTER_START_DOCSTRING, ) class SplinterForPreTraining(SplinterPreTrainedModel): def __init__(self, config): super().__init__(config) self.splinter = SplinterModel(config) self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( SPLINTER_INPUTS_DOCSTRING.format("batch_size, num_questions, sequence_length") ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, question_positions: Optional[torch.LongTensor] = None, ) -> Union[Tuple, SplinterForPreTrainingOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size, num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size, sequence_length)`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if question_positions is None and start_positions is not None and end_positions is not None: raise TypeError("question_positions must be specified in order to calculate the loss") elif question_positions is None and input_ids is None: raise TypeError("question_positions must be specified when input_embeds is used") elif question_positions is None: question_positions = self._prepare_question_positions(input_ids) outputs = self.splinter( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] batch_size, sequence_length, dim = sequence_output.size() # [batch_size, num_questions, sequence_length] start_logits, end_logits = self.splinter_qass(sequence_output, question_positions) num_questions = question_positions.size(1) if attention_mask is not None: attention_mask_for_each_question = attention_mask.unsqueeze(1).expand( batch_size, num_questions, sequence_length ) start_logits = start_logits + (1 - attention_mask_for_each_question) * torch.finfo(start_logits.dtype).min end_logits = end_logits + (1 - attention_mask_for_each_question) * torch.finfo(end_logits.dtype).min total_loss = None # [batch_size, num_questions, sequence_length] if start_positions is not None and end_positions is not None: # sometimes the start/end positions are outside our model inputs, we ignore these terms start_positions.clamp_(0, max(0, sequence_length - 1)) end_positions.clamp_(0, max(0, sequence_length - 1)) # Ignore zero positions in the loss. Splinter never predicts zero # during pretraining and zero is used for padding question # tokens as well as for start and end positions of padded # question tokens. loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id) start_loss = loss_fct( start_logits.view(batch_size * num_questions, sequence_length), start_positions.view(batch_size * num_questions), ) end_loss = loss_fct( end_logits.view(batch_size * num_questions, sequence_length), end_positions.view(batch_size * num_questions), ) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return SplinterForPreTrainingOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor: rows, flat_positions = torch.where(input_ids == self.config.question_token_id) num_questions = torch.bincount(rows) positions = torch.full( (input_ids.size(0), num_questions.max()), self.config.pad_token_id, dtype=torch.long, device=input_ids.device, ) cols = torch.cat([torch.arange(n) for n in num_questions]) positions[rows, cols] = flat_positions return positions
# coding=utf-8 # Copyright 2021 Tel AViv University, AllenAI and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Splinter model.""" import math from dataclasses import dataclass from typing import List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, ModelOutput, QuestionAnsweringModelOutput from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_splinter import SplinterConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "tau/splinter-base" _CONFIG_FOR_DOC = "SplinterConfig" SPLINTER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "tau/splinter-base", "tau/splinter-base-qass", "tau/splinter-large", "tau/splinter-large-qass", # See all Splinter models at https://huggingface.co/models?filter=splinter ] class SplinterEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: Optional[int] = 0, ) -> Tuple: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Splinter class SplinterSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in SplinterModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->Splinter class SplinterSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Splinter class SplinterAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = SplinterSelfAttention(config, position_embedding_type=position_embedding_type) self.output = SplinterSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->Splinter class SplinterIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Splinter class SplinterOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Splinter class SplinterLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = SplinterAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = SplinterAttention(config, position_embedding_type="absolute") self.intermediate = SplinterIntermediate(config) self.output = SplinterOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Splinter class SplinterEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([SplinterLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class SplinterPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = SplinterConfig base_model_prefix = "splinter" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """Initialize the weights""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, SplinterEncoder): module.gradient_checkpointing = value SPLINTER_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`SplinterConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ SPLINTER_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `{0}`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `{0}`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `{0}`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert *input_ids* indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare Splinter Model transformer outputting raw hidden-states without any specific head on top.", SPLINTER_START_DOCSTRING, ) class SplinterModel(SplinterPreTrainedModel): """ The model is an encoder (with only self-attention) following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = SplinterEmbeddings(config) self.encoder = SplinterEncoder(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class SplinterFullyConnectedLayer(nn.Module): def __init__(self, input_dim, output_dim, hidden_act="gelu"): super().__init__() self.input_dim = input_dim self.output_dim = output_dim self.dense = nn.Linear(self.input_dim, self.output_dim) self.act_fn = ACT2FN[hidden_act] self.LayerNorm = nn.LayerNorm(self.output_dim) def forward(self, inputs: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(inputs) hidden_states = self.act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class QuestionAwareSpanSelectionHead(nn.Module): """ Implementation of Question-Aware Span Selection (QASS) head, described in Splinter's paper: """ def __init__(self, config): super().__init__() self.query_start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.query_end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.start_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.end_transform = SplinterFullyConnectedLayer(config.hidden_size, config.hidden_size) self.start_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False) self.end_classifier = nn.Linear(config.hidden_size, config.hidden_size, bias=False) def forward(self, inputs, positions): _, _, dim = inputs.size() index = positions.unsqueeze(-1).repeat(1, 1, dim) # [batch_size, num_positions, dim] gathered_reps = torch.gather(inputs, dim=1, index=index) # [batch_size, num_positions, dim] query_start_reps = self.query_start_transform(gathered_reps) # [batch_size, num_positions, dim] query_end_reps = self.query_end_transform(gathered_reps) # [batch_size, num_positions, dim] start_reps = self.start_transform(inputs) # [batch_size, seq_length, dim] end_reps = self.end_transform(inputs) # [batch_size, seq_length, dim] hidden_states = self.start_classifier(query_start_reps) # [batch_size, num_positions, dim] start_reps = start_reps.permute(0, 2, 1) # [batch_size, dim, seq_length] start_logits = torch.matmul(hidden_states, start_reps) hidden_states = self.end_classifier(query_end_reps) end_reps = end_reps.permute(0, 2, 1) end_logits = torch.matmul(hidden_states, end_reps) return start_logits, end_logits @add_start_docstrings( """ Splinter Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, SPLINTER_START_DOCSTRING, ) class SplinterForQuestionAnswering(SplinterPreTrainedModel): def __init__(self, config): super().__init__(config) self.splinter = SplinterModel(config) self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(SPLINTER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, question_positions: Optional[torch.LongTensor] = None, ) -> Union[Tuple, QuestionAnsweringModelOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size, num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size, sequence_length)`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict question_positions_were_none = False if question_positions is None: if input_ids is not None: question_position_for_each_example = torch.argmax( (torch.eq(input_ids, self.question_token_id)).int(), dim=-1 ) else: question_position_for_each_example = torch.zeros( inputs_embeds.size(0), dtype=torch.long, layout=inputs_embeds.layout, device=inputs_embeds.device ) question_positions = question_position_for_each_example.unsqueeze(-1) question_positions_were_none = True outputs = self.splinter( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] start_logits, end_logits = self.splinter_qass(sequence_output, question_positions) if question_positions_were_none: start_logits, end_logits = start_logits.squeeze(1), end_logits.squeeze(1) if attention_mask is not None: start_logits = start_logits + (1 - attention_mask) * torch.finfo(start_logits.dtype).min end_logits = end_logits + (1 - attention_mask) * torch.finfo(end_logits.dtype).min total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @dataclass class SplinterForPreTrainingOutput(ModelOutput): """ Class for outputs of Splinter as a span selection model. Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when start and end positions are provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`): Span-start scores (before SoftMax). end_logits (`torch.FloatTensor` of shape `(batch_size, num_questions, sequence_length)`): Span-end scores (before SoftMax). hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the optional initial embedding outputs. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @add_start_docstrings( """ Splinter Model for the recurring span selection task as done during the pretraining. The difference to the QA task is that we do not have a question, but multiple question tokens that replace the occurrences of recurring spans instead. """, SPLINTER_START_DOCSTRING, ) class SplinterForPreTraining(SplinterPreTrainedModel): def __init__(self, config): super().__init__(config) self.splinter = SplinterModel(config) self.splinter_qass = QuestionAwareSpanSelectionHead(config) self.question_token_id = config.question_token_id # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward( SPLINTER_INPUTS_DOCSTRING.format("batch_size, num_questions, sequence_length") ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, start_positions: Optional[torch.LongTensor] = None, end_positions: Optional[torch.LongTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, question_positions: Optional[torch.LongTensor] = None, ) -> Union[Tuple, SplinterForPreTrainingOutput]: r""" start_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. question_positions (`torch.LongTensor` of shape `(batch_size, num_questions)`, *optional*): The positions of all question tokens. If given, start_logits and end_logits will be of shape `(batch_size, num_questions, sequence_length)`. If None, the first question token in each sequence in the batch will be the only one for which start_logits and end_logits are calculated and they will be of shape `(batch_size, sequence_length)`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if question_positions is None and start_positions is not None and end_positions is not None: raise TypeError("question_positions must be specified in order to calculate the loss") elif question_positions is None and input_ids is None: raise TypeError("question_positions must be specified when input_embeds is used") elif question_positions is None: question_positions = self._prepare_question_positions(input_ids) outputs = self.splinter( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] batch_size, sequence_length, dim = sequence_output.size() # [batch_size, num_questions, sequence_length] start_logits, end_logits = self.splinter_qass(sequence_output, question_positions) num_questions = question_positions.size(1) if attention_mask is not None: attention_mask_for_each_question = attention_mask.unsqueeze(1).expand( batch_size, num_questions, sequence_length ) start_logits = start_logits + (1 - attention_mask_for_each_question) * torch.finfo(start_logits.dtype).min end_logits = end_logits + (1 - attention_mask_for_each_question) * torch.finfo(end_logits.dtype).min total_loss = None # [batch_size, num_questions, sequence_length] if start_positions is not None and end_positions is not None: # sometimes the start/end positions are outside our model inputs, we ignore these terms start_positions.clamp_(0, max(0, sequence_length - 1)) end_positions.clamp_(0, max(0, sequence_length - 1)) # Ignore zero positions in the loss. Splinter never predicts zero # during pretraining and zero is used for padding question # tokens as well as for start and end positions of padded # question tokens. loss_fct = CrossEntropyLoss(ignore_index=self.config.pad_token_id) start_loss = loss_fct( start_logits.view(batch_size * num_questions, sequence_length), start_positions.view(batch_size * num_questions), ) end_loss = loss_fct( end_logits.view(batch_size * num_questions, sequence_length), end_positions.view(batch_size * num_questions), ) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return SplinterForPreTrainingOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def _prepare_question_positions(self, input_ids: torch.Tensor) -> torch.Tensor: rows, flat_positions = torch.where(input_ids == self.config.question_token_id) num_questions = torch.bincount(rows) positions = torch.full( (input_ids.size(0), num_questions.max()), self.config.pad_token_id, dtype=torch.long, device=input_ids.device, ) cols = torch.cat([torch.arange(n) for n in num_questions]) positions[rows, cols] = flat_positions return positions
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/utils/dummy_vision_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class ImageProcessingMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageFeatureExtractionMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BlipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BridgeTowerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientNetImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Mask2FormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OneFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Swin2SRImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTHybridImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"])
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class ImageProcessingMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageFeatureExtractionMixin(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BeitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BlipImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class BridgeTowerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ChineseCLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class CLIPImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConditionalDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ConvNextImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeformableDetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DeiTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DetrImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DonutImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class DPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class EfficientNetImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class FlavaProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class GLPNImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ImageGPTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LayoutLMv3ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class LevitImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Mask2FormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MaskFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV1ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2FeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileNetV2ImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class MobileViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OneFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class OwlViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PerceiverImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class PoolFormerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class SegformerImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class Swin2SRImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class TvltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class VideoMAEImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViltProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class ViTHybridImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosFeatureExtractor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"]) class YolosImageProcessor(metaclass=DummyObject): _backends = ["vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["vision"])
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/wavlm/__init__.py
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/vision_text_dual_encoder/test_modeling_vision_text_dual_encoder.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch VisionTextDualEncoder model. """ import collections import tempfile import unittest import numpy as np from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_bert import BertModelTester from ..clip.test_modeling_clip import CLIPVisionModelTester from ..deit.test_modeling_deit import DeiTModelTester from ..roberta.test_modeling_roberta import RobertaModelTester from ..vit.test_modeling_vit import ViTModelTester if is_torch_available(): import torch from transformers import ( BertModel, CLIPVisionModel, DeiTModel, RobertaModel, VisionTextDualEncoderConfig, VisionTextDualEncoderModel, ViTModel, ) if is_flax_available(): from transformers import FlaxVisionTextDualEncoderModel from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor # Inspired by # https://github.com/rwightman/pytorch-image-models/blob/b9bd960a032c75ca6b808ddeed76bee5f3ed4972/timm/models/layers/helpers.py # From PyTorch internals def to_2tuple(x): if isinstance(x, collections.abc.Iterable): return x return (x, x) @require_torch class VisionTextDualEncoderMixin: def get_vision_text_model(self, config, text_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_model_from_pretrained_configs( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) model = VisionTextDualEncoderModel(config) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim)) def check_vision_text_dual_encoder_model( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_vision_text_dual_encoder_from_pretrained( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = VisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() with torch.no_grad(): output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_1 = output[0].cpu().numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = VisionTextDualEncoderModel.from_pretrained(tmpdirname).eval() model.to(torch_device) after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_2 = after_output[0].cpu().numpy() max_diff = np.amax(np.abs(out_2 - out_1)) self.assertLessEqual(max_diff, 1e-5) def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_equivalence(self, pt_model, fx_model, input_ids, attention_mask, pixel_values, **kwargs): pt_model.to(torch_device) pt_model.eval() # prepare inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values} pt_inputs = inputs_dict flax_inputs = {k: v.numpy() for k, v in pt_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**flax_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxVisionTextDualEncoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**flax_inputs).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = VisionTextDualEncoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 4e-2) def check_equivalence_pt_to_flax(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, **inputs_dict) def check_equivalence_flax_to_pt(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, **inputs_dict) def test_vision_text_dual_encoder_model(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**inputs_dict) def test_model_from_pretrained_configs(self): inputs_dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**inputs_dict) def test_vision_text_dual_encoder_from_pretrained(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict) def test_save_load(self): inputs_dict = self.prepare_config_and_inputs() self.check_save_load(**inputs_dict) def test_vision_text_output_attention(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**inputs_dict) @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() vision_config = config_inputs_dict.pop("vision_config") text_config = config_inputs_dict.pop("text_config") inputs_dict = config_inputs_dict self.check_equivalence_pt_to_flax(vision_config, text_config, inputs_dict) self.check_equivalence_flax_to_pt(vision_config, text_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = VisionTextDualEncoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class ViTBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = ViTModel(vision_config).eval() text_model = BertModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = ViTModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_torch class DeiTRobertaModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-deit", "hf-internal-testing/tiny-random-roberta" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def get_vision_text_model(self, vision_config, text_config): vision_model = DeiTModel(vision_config).eval() text_model = RobertaModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = DeiTModelTester(self) bert_model_tester = RobertaModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } # skip as DeiT is not available in Flax def test_pt_flax_equivalence(self): pass @require_torch class CLIPVisionBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = CLIPVisionModel(vision_config).eval() text_model = BertModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): clip_model_tester = CLIPVisionModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_torch class VisionTextDualEncoderIntegrationTest(unittest.TestCase): @slow def test_inference(self): model = VisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1) processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor( text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="pt" ) outputs = model(**inputs) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) expected_logits = torch.tensor([[1.2284727, 0.3104122]]) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch VisionTextDualEncoder model. """ import collections import tempfile import unittest import numpy as np from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_vision, slow, torch_device from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_bert import BertModelTester from ..clip.test_modeling_clip import CLIPVisionModelTester from ..deit.test_modeling_deit import DeiTModelTester from ..roberta.test_modeling_roberta import RobertaModelTester from ..vit.test_modeling_vit import ViTModelTester if is_torch_available(): import torch from transformers import ( BertModel, CLIPVisionModel, DeiTModel, RobertaModel, VisionTextDualEncoderConfig, VisionTextDualEncoderModel, ViTModel, ) if is_flax_available(): from transformers import FlaxVisionTextDualEncoderModel from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor # Inspired by # https://github.com/rwightman/pytorch-image-models/blob/b9bd960a032c75ca6b808ddeed76bee5f3ed4972/timm/models/layers/helpers.py # From PyTorch internals def to_2tuple(x): if isinstance(x, collections.abc.Iterable): return x return (x, x) @require_torch class VisionTextDualEncoderMixin: def get_vision_text_model(self, config, text_config): pass def prepare_config_and_inputs(self): pass def get_pretrained_model_and_inputs(self): pass def check_model_from_pretrained_configs( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) model = VisionTextDualEncoderModel(config) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], config.projection_dim)) def check_vision_text_dual_encoder_model( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_vision_text_dual_encoder_from_pretrained( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) kwargs = {"vision_model": vision_model, "text_model": text_model} model = VisionTextDualEncoderModel.from_vision_text_pretrained(**kwargs) model.to(torch_device) model.eval() output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) self.assertEqual(output["text_embeds"].shape, (input_ids.shape[0], model.config.projection_dim)) self.assertEqual(output["image_embeds"].shape, (pixel_values.shape[0], model.config.projection_dim)) def check_save_load(self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() with torch.no_grad(): output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_1 = output[0].cpu().numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = VisionTextDualEncoderModel.from_pretrained(tmpdirname).eval() model.to(torch_device) after_output = model(input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask) out_2 = after_output[0].cpu().numpy() max_diff = np.amax(np.abs(out_2 - out_1)) self.assertLessEqual(max_diff, 1e-5) def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def check_pt_flax_equivalence(self, pt_model, fx_model, input_ids, attention_mask, pixel_values, **kwargs): pt_model.to(torch_device) pt_model.eval() # prepare inputs inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values} pt_inputs = inputs_dict flax_inputs = {k: v.numpy() for k, v in pt_inputs.items()} with torch.no_grad(): pt_outputs = pt_model(**pt_inputs).to_tuple() fx_outputs = fx_model(**flax_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output in zip(fx_outputs[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output, pt_output.numpy(), 4e-2) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = FlaxVisionTextDualEncoderModel.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**flax_inputs).to_tuple() self.assertEqual(len(fx_outputs_loaded), len(pt_outputs), "Output lengths differ between Flax and PyTorch") for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4], pt_outputs[:4]): self.assert_almost_equals(fx_output_loaded, pt_output.numpy(), 4e-2) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = VisionTextDualEncoderModel.from_pretrained(tmpdirname, from_flax=True) pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs).to_tuple() self.assertEqual(len(fx_outputs), len(pt_outputs_loaded), "Output lengths differ between Flax and PyTorch") for fx_output, pt_output_loaded in zip(fx_outputs[:4], pt_outputs_loaded[:4]): self.assert_almost_equals(fx_output, pt_output_loaded.numpy(), 4e-2) def check_equivalence_pt_to_flax(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state self.check_pt_flax_equivalence(pt_model, fx_model, **inputs_dict) def check_equivalence_flax_to_pt(self, vision_config, text_config, inputs_dict): config = VisionTextDualEncoderConfig.from_vision_text_configs(vision_config, text_config) pt_model = VisionTextDualEncoderModel(config) fx_model = FlaxVisionTextDualEncoderModel(config) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) self.check_pt_flax_equivalence(pt_model, fx_model, **inputs_dict) def test_vision_text_dual_encoder_model(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**inputs_dict) def test_model_from_pretrained_configs(self): inputs_dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**inputs_dict) def test_vision_text_dual_encoder_from_pretrained(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**inputs_dict) def test_save_load(self): inputs_dict = self.prepare_config_and_inputs() self.check_save_load(**inputs_dict) def test_vision_text_output_attention(self): inputs_dict = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**inputs_dict) @is_pt_flax_cross_test def test_pt_flax_equivalence(self): config_inputs_dict = self.prepare_config_and_inputs() vision_config = config_inputs_dict.pop("vision_config") text_config = config_inputs_dict.pop("text_config") inputs_dict = config_inputs_dict self.check_equivalence_pt_to_flax(vision_config, text_config, inputs_dict) self.check_equivalence_flax_to_pt(vision_config, text_config, inputs_dict) @slow def test_real_model_save_load_from_pretrained(self): model_2, inputs = self.get_pretrained_model_and_inputs() model_2.to(torch_device) with torch.no_grad(): outputs = model_2(**inputs) out_2 = outputs[0].cpu().numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_2.save_pretrained(tmp_dirname) model_1 = VisionTextDualEncoderModel.from_pretrained(tmp_dirname) model_1.to(torch_device) after_outputs = model_1(**inputs) out_1 = after_outputs[0].cpu().numpy() max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @require_torch class ViTBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = ViTModel(vision_config).eval() text_model = BertModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = ViTModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_torch class DeiTRobertaModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-deit", "hf-internal-testing/tiny-random-roberta" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def check_vision_text_output_attention( self, text_config, input_ids, attention_mask, vision_config, pixel_values=None, **kwargs ): vision_model, text_model = self.get_vision_text_model(vision_config, text_config) model = VisionTextDualEncoderModel(vision_model=vision_model, text_model=text_model) model.to(torch_device) model.eval() output = model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, output_attentions=True ) vision_attentions = output.vision_model_output.attentions self.assertEqual(len(vision_attentions), vision_config.num_hidden_layers) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) image_size = to_2tuple(vision_model.config.image_size) patch_size = to_2tuple(vision_model.config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) seq_len = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:], (vision_config.num_attention_heads, seq_len, seq_len)) text_attentions = output.text_model_output.attentions self.assertEqual(len(text_attentions), text_config.num_hidden_layers) self.assertEqual( text_attentions[0].shape[-3:], (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]), ) def get_vision_text_model(self, vision_config, text_config): vision_model = DeiTModel(vision_config).eval() text_model = RobertaModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): vit_model_tester = DeiTModelTester(self) bert_model_tester = RobertaModelTester(self) vision_config_and_inputs = vit_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values, _ = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } # skip as DeiT is not available in Flax def test_pt_flax_equivalence(self): pass @require_torch class CLIPVisionBertModelTest(VisionTextDualEncoderMixin, unittest.TestCase): def get_pretrained_model_and_inputs(self): model = VisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-clip", "hf-internal-testing/tiny-bert" ) batch_size = 13 pixel_values = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) input_ids = ids_tensor([batch_size, 4], model.text_model.config.vocab_size) attention_mask = random_attention_mask([batch_size, 4]) inputs = {"pixel_values": pixel_values, "input_ids": input_ids, "attention_mask": attention_mask} return model, inputs def get_vision_text_model(self, vision_config, text_config): vision_model = CLIPVisionModel(vision_config).eval() text_model = BertModel(text_config).eval() return vision_model, text_model def prepare_config_and_inputs(self): clip_model_tester = CLIPVisionModelTester(self) bert_model_tester = BertModelTester(self) vision_config_and_inputs = clip_model_tester.prepare_config_and_inputs() text_config_and_inputs = bert_model_tester.prepare_config_and_inputs() vision_config, pixel_values = vision_config_and_inputs ( text_config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_torch class VisionTextDualEncoderIntegrationTest(unittest.TestCase): @slow def test_inference(self): model = VisionTextDualEncoderModel.from_pretrained("clip-italian/clip-italian", logit_scale_init_value=1) processor = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian") image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = processor( text=["una foto di un gatto", "una foto di un cane"], images=image, padding=True, return_tensors="pt" ) outputs = model(**inputs) # verify the logits self.assertEqual(outputs.logits_per_image.shape, (inputs.pixel_values.shape[0], inputs.input_ids.shape[0])) self.assertEqual( outputs.logits_per_text.shape, (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]), ) expected_logits = torch.tensor([[1.2284727, 0.3104122]]) self.assertTrue(torch.allclose(outputs.logits_per_image, expected_logits, atol=1e-3))
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/blenderbot/test_tokenization_blenderbot.py
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Blenderbot Tokenizers, including common tests for BlenderbotSmallTokenizer.""" import unittest from transformers import BlenderbotTokenizer, BlenderbotTokenizerFast from transformers.utils import cached_property class Blenderbot3BTokenizerTests(unittest.TestCase): @cached_property def tokenizer_3b(self): return BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") @cached_property def rust_tokenizer_3b(self): return BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B") def test_encode_decode_cycle(self): tok = self.tokenizer_3b src_text = " I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text == decoded def test_encode_decode_cycle_rust_tokenizer(self): tok = self.rust_tokenizer_3b src_text = " I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text == decoded def test_3B_tokenization_same_as_parlai(self): assert self.tokenizer_3b.add_prefix_space assert self.tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]] def test_3B_tokenization_same_as_parlai_rust_tokenizer(self): assert self.rust_tokenizer_3b.add_prefix_space assert self.rust_tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]]
#!/usr/bin/env python3 # coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for Blenderbot Tokenizers, including common tests for BlenderbotSmallTokenizer.""" import unittest from transformers import BlenderbotTokenizer, BlenderbotTokenizerFast from transformers.utils import cached_property class Blenderbot3BTokenizerTests(unittest.TestCase): @cached_property def tokenizer_3b(self): return BlenderbotTokenizer.from_pretrained("facebook/blenderbot-3B") @cached_property def rust_tokenizer_3b(self): return BlenderbotTokenizerFast.from_pretrained("facebook/blenderbot-3B") def test_encode_decode_cycle(self): tok = self.tokenizer_3b src_text = " I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text == decoded def test_encode_decode_cycle_rust_tokenizer(self): tok = self.rust_tokenizer_3b src_text = " I am a small frog." encoded = tok([src_text], padding=False, truncation=False)["input_ids"] decoded = tok.batch_decode(encoded, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] assert src_text == decoded def test_3B_tokenization_same_as_parlai(self): assert self.tokenizer_3b.add_prefix_space assert self.tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]] def test_3B_tokenization_same_as_parlai_rust_tokenizer(self): assert self.rust_tokenizer_3b.add_prefix_space assert self.rust_tokenizer_3b([" Sam", "Sam"]).input_ids == [[5502, 2], [5502, 2]]
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/retribert/configuration_retribert.py
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RetriBERT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) # TODO: upload to AWS RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class RetriBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RetriBERT [yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RetriBertModel`] hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. share_encoders (`bool`, *optional*, defaults to `True`): Whether or not to use the same Bert-type encoder for the queries and document projection_dim (`int`, *optional*, defaults to 128): Final dimension of the query and document representation after projection """ model_type = "retribert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=8, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, share_encoders=True, projection_dim=128, pad_token_id=0, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.share_encoders = share_encoders self.projection_dim = projection_dim
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ RetriBERT model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) # TODO: upload to AWS RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "yjernite/retribert-base-uncased": ( "https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json" ), } class RetriBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`RetriBertModel`]. It is used to instantiate a RetriBertModel model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the RetriBERT [yjernite/retribert-base-uncased](https://huggingface.co/yjernite/retribert-base-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the RetriBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`RetriBertModel`] hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the *token_type_ids* passed into [`BertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. share_encoders (`bool`, *optional*, defaults to `True`): Whether or not to use the same Bert-type encoder for the queries and document projection_dim (`int`, *optional*, defaults to 128): Final dimension of the query and document representation after projection """ model_type = "retribert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=8, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, share_encoders=True, projection_dim=128, pad_token_id=0, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.share_encoders = share_encoders self.projection_dim = projection_dim
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/modelcard.py
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration base class and utilities.""" import copy import json import os import warnings from dataclasses import dataclass from pathlib import Path from typing import Any, Dict, List, Optional, Union import requests import yaml from huggingface_hub import model_info from huggingface_hub.utils import HFValidationError from . import __version__ from .models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, ) from .training_args import ParallelMode from .utils import ( MODEL_CARD_NAME, cached_file, is_datasets_available, is_offline_mode, is_tf_available, is_tokenizers_available, is_torch_available, logging, ) TASK_MAPPING = { "text-generation": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "image-segmentation": MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, "fill-mask": MODEL_FOR_MASKED_LM_MAPPING_NAMES, "object-detection": MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "text2text-generation": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "text-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "table-question-answering": MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "automatic-speech-recognition": {**MODEL_FOR_CTC_MAPPING_NAMES, **MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES}, } logger = logging.get_logger(__name__) class ModelCard: r""" Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards. Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993 Note: A model card can be loaded and saved to disk. """ def __init__(self, **kwargs): warnings.warn( "The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning ) # Recommended attributes from https://arxiv.org/abs/1810.03993 (see papers) self.model_details = kwargs.pop("model_details", {}) self.intended_use = kwargs.pop("intended_use", {}) self.factors = kwargs.pop("factors", {}) self.metrics = kwargs.pop("metrics", {}) self.evaluation_data = kwargs.pop("evaluation_data", {}) self.training_data = kwargs.pop("training_data", {}) self.quantitative_analyses = kwargs.pop("quantitative_analyses", {}) self.ethical_considerations = kwargs.pop("ethical_considerations", {}) self.caveats_and_recommendations = kwargs.pop("caveats_and_recommendations", {}) # Open additional attributes for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def save_pretrained(self, save_directory_or_file): """Save a model card object to the directory or file `save_directory_or_file`.""" if os.path.isdir(save_directory_or_file): # If we save using the predefined names, we can load using `from_pretrained` output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME) else: output_model_card_file = save_directory_or_file self.to_json_file(output_model_card_file) logger.info(f"Model card saved in {output_model_card_file}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a [`ModelCard`] from a pre-trained model model card. Parameters: pretrained_model_name_or_path: either: - a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`] method, e.g.: `./my_model_directory/`. - a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`. cache_dir: (*optional*) string: Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache should not be used. kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading. - The values in kwargs of any keys which are model card attributes will be used to override the loaded values. - Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the *return_unused_kwargs* keyword parameter. proxies: (*optional*) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. return_unused_kwargs: (*optional*) bool: - If False, then this function returns just the final model card object. - If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of kwargs which has not been used to update *ModelCard* and is otherwise ignored. Examples: ```python # Download model card from huggingface.co and cache. modelcard = ModelCard.from_pretrained("bert-base-uncased") # Model card was saved using *save_pretrained('./test/saved_model/')* modelcard = ModelCard.from_pretrained("./test/saved_model/") modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json") modelcard = ModelCard.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) ```""" cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) from_pipeline = kwargs.pop("_from_pipeline", None) user_agent = {"file_type": "model_card"} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): resolved_model_card_file = pretrained_model_name_or_path is_local = True else: try: # Load from URL or cache if already cached resolved_model_card_file = cached_file( pretrained_model_name_or_path, filename=MODEL_CARD_NAME, cache_dir=cache_dir, proxies=proxies, user_agent=user_agent, ) if is_local: logger.info(f"loading model card file {resolved_model_card_file}") else: logger.info(f"loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}") # Load model card modelcard = cls.from_json_file(resolved_model_card_file) except (EnvironmentError, json.JSONDecodeError): # We fall back on creating an empty model card modelcard = cls() # Update model card with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(modelcard, key): setattr(modelcard, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(f"Model card: {modelcard}") if return_unused_kwargs: return modelcard, kwargs else: return modelcard @classmethod def from_dict(cls, json_object): """Constructs a `ModelCard` from a Python dictionary of parameters.""" return cls(**json_object) @classmethod def from_json_file(cls, json_file): """Constructs a `ModelCard` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() dict_obj = json.loads(text) return cls(**dict_obj) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """Save this instance to a json file.""" with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) AUTOGENERATED_TRAINER_COMMENT = """ <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> """ AUTOGENERATED_KERAS_COMMENT = """ <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> """ TASK_TAG_TO_NAME_MAPPING = { "fill-mask": "Masked Language Modeling", "image-classification": "Image Classification", "image-segmentation": "Image Segmentation", "multiple-choice": "Multiple Choice", "object-detection": "Object Detection", "question-answering": "Question Answering", "summarization": "Summarization", "table-question-answering": "Table Question Answering", "text-classification": "Text Classification", "text-generation": "Causal Language Modeling", "text2text-generation": "Sequence-to-sequence Language Modeling", "token-classification": "Token Classification", "translation": "Translation", "zero-shot-classification": "Zero Shot Classification", "automatic-speech-recognition": "Automatic Speech Recognition", } METRIC_TAGS = [ "accuracy", "bleu", "f1", "matthews_correlation", "pearsonr", "precision", "recall", "rouge", "sacrebleu", "spearmanr", "wer", ] def _listify(obj): if obj is None: return [] elif isinstance(obj, str): return [obj] else: return obj def _insert_values_as_list(metadata, name, values): if values is None: return metadata if isinstance(values, str): values = [values] values = [v for v in values if v is not None] if len(values) == 0: return metadata metadata[name] = values return metadata def infer_metric_tags_from_eval_results(eval_results): if eval_results is None: return {} result = {} for key in eval_results.keys(): if key.lower().replace(" ", "_") in METRIC_TAGS: result[key.lower().replace(" ", "_")] = key elif key.lower() == "rouge1": result["rouge"] = key return result def _insert_value(metadata, name, value): if value is None: return metadata metadata[name] = value return metadata def is_hf_dataset(dataset): if not is_datasets_available(): return False from datasets import Dataset, IterableDataset return isinstance(dataset, (Dataset, IterableDataset)) def _get_mapping_values(mapping): result = [] for v in mapping.values(): if isinstance(v, (tuple, list)): result += list(v) else: result.append(v) return result @dataclass class TrainingSummary: model_name: str language: Optional[Union[str, List[str]]] = None license: Optional[str] = None tags: Optional[Union[str, List[str]]] = None finetuned_from: Optional[str] = None tasks: Optional[Union[str, List[str]]] = None dataset: Optional[Union[str, List[str]]] = None dataset_tags: Optional[Union[str, List[str]]] = None dataset_args: Optional[Union[str, List[str]]] = None dataset_metadata: Optional[Dict[str, Any]] = None eval_results: Optional[Dict[str, float]] = None eval_lines: Optional[List[str]] = None hyperparameters: Optional[Dict[str, Any]] = None source: Optional[str] = "trainer" def __post_init__(self): # Infer default license from the checkpoint used, if possible. if ( self.license is None and not is_offline_mode() and self.finetuned_from is not None and len(self.finetuned_from) > 0 ): try: info = model_info(self.finetuned_from) for tag in info.tags: if tag.startswith("license:"): self.license = tag[8:] except (requests.exceptions.HTTPError, HFValidationError): pass def create_model_index(self, metric_mapping): model_index = {"name": self.model_name} # Dataset mapping tag -> name dataset_names = _listify(self.dataset) dataset_tags = _listify(self.dataset_tags) dataset_args = _listify(self.dataset_args) dataset_metadata = _listify(self.dataset_metadata) if len(dataset_args) < len(dataset_tags): dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) dataset_mapping = {tag: name for tag, name in zip(dataset_tags, dataset_names)} dataset_arg_mapping = {tag: arg for tag, arg in zip(dataset_tags, dataset_args)} dataset_metadata_mapping = {tag: metadata for tag, metadata in zip(dataset_tags, dataset_metadata)} task_mapping = { task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING } model_index["results"] = [] if len(task_mapping) == 0 and len(dataset_mapping) == 0: return [model_index] if len(task_mapping) == 0: task_mapping = {None: None} if len(dataset_mapping) == 0: dataset_mapping = {None: None} # One entry per dataset and per task all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] for task_tag, ds_tag in all_possibilities: result = {} if task_tag is not None: result["task"] = {"name": task_mapping[task_tag], "type": task_tag} if ds_tag is not None: metadata = dataset_metadata_mapping.get(ds_tag, {}) result["dataset"] = { "name": dataset_mapping[ds_tag], "type": ds_tag, **metadata, } if dataset_arg_mapping[ds_tag] is not None: result["dataset"]["args"] = dataset_arg_mapping[ds_tag] if len(metric_mapping) > 0: result["metrics"] = [] for metric_tag, metric_name in metric_mapping.items(): result["metrics"].append( { "name": metric_name, "type": metric_tag, "value": self.eval_results[metric_name], } ) # Remove partial results to avoid the model card being rejected. if "task" in result and "dataset" in result and "metrics" in result: model_index["results"].append(result) else: logger.info(f"Dropping the following result as it does not have all the necessary fields:\n{result}") return [model_index] def create_metadata(self): metric_mapping = infer_metric_tags_from_eval_results(self.eval_results) metadata = {} metadata = _insert_values_as_list(metadata, "language", self.language) metadata = _insert_value(metadata, "license", self.license) metadata = _insert_values_as_list(metadata, "tags", self.tags) metadata = _insert_values_as_list(metadata, "datasets", self.dataset_tags) metadata = _insert_values_as_list(metadata, "metrics", list(metric_mapping.keys())) metadata["model-index"] = self.create_model_index(metric_mapping) return metadata def to_model_card(self): model_card = "" metadata = yaml.dump(self.create_metadata(), sort_keys=False) if len(metadata) > 0: model_card = f"---\n{metadata}---\n" # Now the model card for realsies. if self.source == "trainer": model_card += AUTOGENERATED_TRAINER_COMMENT else: model_card += AUTOGENERATED_KERAS_COMMENT model_card += f"\n# {self.model_name}\n\n" if self.finetuned_from is None: model_card += "This model was trained from scratch on " else: model_card += ( "This model is a fine-tuned version of" f" [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on " ) if self.dataset is None: model_card += "an unknown dataset." else: if isinstance(self.dataset, str): model_card += f"the {self.dataset} dataset." elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1: model_card += f"the {self.dataset[0]} dataset." else: model_card += ( ", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets." ) if self.eval_results is not None: model_card += "\nIt achieves the following results on the evaluation set:\n" model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()]) model_card += "\n" model_card += "\n## Model description\n\nMore information needed\n" model_card += "\n## Intended uses & limitations\n\nMore information needed\n" model_card += "\n## Training and evaluation data\n\nMore information needed\n" model_card += "\n## Training procedure\n" model_card += "\n### Training hyperparameters\n" if self.hyperparameters is not None: model_card += "\nThe following hyperparameters were used during training:\n" model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()]) model_card += "\n" else: model_card += "\nMore information needed\n" if self.eval_lines is not None: model_card += "\n### Training results\n\n" model_card += make_markdown_table(self.eval_lines) model_card += "\n" model_card += "\n### Framework versions\n\n" model_card += f"- Transformers {__version__}\n" if self.source == "trainer" and is_torch_available(): import torch model_card += f"- Pytorch {torch.__version__}\n" elif self.source == "keras" and is_tf_available(): import tensorflow as tf model_card += f"- TensorFlow {tf.__version__}\n" if is_datasets_available(): import datasets model_card += f"- Datasets {datasets.__version__}\n" if is_tokenizers_available(): import tokenizers model_card += f"- Tokenizers {tokenizers.__version__}\n" return model_card @classmethod def from_trainer( cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset_metadata=None, dataset=None, dataset_args=None, ): # Infer default from dataset one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None): default_tag = one_dataset.builder_name # Those are not real datasets from the Hub so we exclude them. if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: if dataset_metadata is None: dataset_metadata = [{"config": one_dataset.config_name, "split": str(one_dataset.split)}] if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [one_dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags # Infer default finetuned_from if ( finetuned_from is None and hasattr(trainer.model.config, "_name_or_path") and not os.path.isdir(trainer.model.config._name_or_path) ): finetuned_from = trainer.model.config._name_or_path # Infer default task tag: if tasks is None: model_class_name = trainer.model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task if model_name is None: model_name = Path(trainer.args.output_dir).name if len(model_name) == 0: model_name = finetuned_from # Add `generated_from_trainer` to the tags if tags is None: tags = ["generated_from_trainer"] elif isinstance(tags, str) and tags != "generated_from_trainer": tags = [tags, "generated_from_trainer"] elif "generated_from_trainer" not in tags: tags.append("generated_from_trainer") _, eval_lines, eval_results = parse_log_history(trainer.state.log_history) hyperparameters = extract_hyperparameters_from_trainer(trainer) return cls( language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset=dataset, dataset_tags=dataset_tags, dataset_args=dataset_args, dataset_metadata=dataset_metadata, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, ) @classmethod def from_keras( cls, model, model_name, keras_history=None, language=None, license=None, tags=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset=None, dataset_args=None, ): # Infer default from dataset if dataset is not None: if is_hf_dataset(dataset) and (dataset_tags is None or dataset_args is None): default_tag = dataset.builder_name # Those are not real datasets from the Hub so we exclude them. if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags # Infer default finetuned_from if ( finetuned_from is None and hasattr(model.config, "_name_or_path") and not os.path.isdir(model.config._name_or_path) ): finetuned_from = model.config._name_or_path # Infer default task tag: if tasks is None: model_class_name = model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task # Add `generated_from_keras_callback` to the tags if tags is None: tags = ["generated_from_keras_callback"] elif isinstance(tags, str) and tags != "generated_from_keras_callback": tags = [tags, "generated_from_keras_callback"] elif "generated_from_keras_callback" not in tags: tags.append("generated_from_keras_callback") if keras_history is not None: _, eval_lines, eval_results = parse_keras_history(keras_history) else: eval_lines = [] eval_results = {} hyperparameters = extract_hyperparameters_from_keras(model) return cls( language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, source="keras", ) def parse_keras_history(logs): """ Parse the `logs` of either a `tf.keras.History` object returned by `model.fit()` or an accumulated logs `dict` passed to the `PushToHubCallback`. Returns lines and logs compatible with those returned by `parse_log_history`. """ if hasattr(logs, "history"): # This looks like a `History` object if not hasattr(logs, "epoch"): # This history looks empty, return empty results return None, [], {} logs.history["epoch"] = logs.epoch logs = logs.history else: # Training logs is a list of dicts, let's invert it to a dict of lists to match a History object logs = {log_key: [single_dict[log_key] for single_dict in logs] for log_key in logs[0]} lines = [] for i in range(len(logs["epoch"])): epoch_dict = {log_key: log_value_list[i] for log_key, log_value_list in logs.items()} values = {} for k, v in epoch_dict.items(): if k.startswith("val_"): k = "validation_" + k[4:] elif k != "epoch": k = "train_" + k splits = k.split("_") name = " ".join([part.capitalize() for part in splits]) values[name] = v lines.append(values) eval_results = lines[-1] return logs, lines, eval_results def parse_log_history(log_history): """ Parse the `log_history` of a Trainer to get the intermediate and final evaluation results. """ idx = 0 while idx < len(log_history) and "train_runtime" not in log_history[idx]: idx += 1 # If there are no training logs if idx == len(log_history): idx -= 1 while idx >= 0 and "eval_loss" not in log_history[idx]: idx -= 1 if idx >= 0: return None, None, log_history[idx] else: return None, None, None # From now one we can assume we have training logs: train_log = log_history[idx] lines = [] training_loss = "No log" for i in range(idx): if "loss" in log_history[i]: training_loss = log_history[i]["loss"] if "eval_loss" in log_history[i]: metrics = log_history[i].copy() _ = metrics.pop("total_flos", None) epoch = metrics.pop("epoch", None) step = metrics.pop("step", None) _ = metrics.pop("eval_runtime", None) _ = metrics.pop("eval_samples_per_second", None) _ = metrics.pop("eval_steps_per_second", None) _ = metrics.pop("eval_jit_compilation_time", None) values = {"Training Loss": training_loss, "Epoch": epoch, "Step": step} for k, v in metrics.items(): if k == "eval_loss": values["Validation Loss"] = v else: splits = k.split("_") name = " ".join([part.capitalize() for part in splits[1:]]) values[name] = v lines.append(values) idx = len(log_history) - 1 while idx >= 0 and "eval_loss" not in log_history[idx]: idx -= 1 if idx > 0: eval_results = {} for key, value in log_history[idx].items(): if key.startswith("eval_"): key = key[5:] if key not in ["runtime", "samples_per_second", "steps_per_second", "epoch", "step"]: camel_cased_key = " ".join([part.capitalize() for part in key.split("_")]) eval_results[camel_cased_key] = value return train_log, lines, eval_results else: return train_log, lines, None def extract_hyperparameters_from_keras(model): import tensorflow as tf hyperparameters = {} if hasattr(model, "optimizer") and model.optimizer is not None: hyperparameters["optimizer"] = model.optimizer.get_config() else: hyperparameters["optimizer"] = None hyperparameters["training_precision"] = tf.keras.mixed_precision.global_policy().name return hyperparameters def _maybe_round(v, decimals=4): if isinstance(v, float) and len(str(v).split(".")) > 1 and len(str(v).split(".")[1]) > decimals: return f"{v:.{decimals}f}" return str(v) def _regular_table_line(values, col_widths): values_with_space = [f"| {v}" + " " * (w - len(v) + 1) for v, w in zip(values, col_widths)] return "".join(values_with_space) + "|\n" def _second_table_line(col_widths): values = ["|:" + "-" * w + ":" for w in col_widths] return "".join(values) + "|\n" def make_markdown_table(lines): """ Create a nice Markdown table from the results in `lines`. """ if lines is None or len(lines) == 0: return "" col_widths = {key: len(str(key)) for key in lines[0].keys()} for line in lines: for key, value in line.items(): if col_widths[key] < len(_maybe_round(value)): col_widths[key] = len(_maybe_round(value)) table = _regular_table_line(list(lines[0].keys()), list(col_widths.values())) table += _second_table_line(list(col_widths.values())) for line in lines: table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values())) return table _TRAINING_ARGS_KEYS = [ "learning_rate", "train_batch_size", "eval_batch_size", "seed", ] def extract_hyperparameters_from_trainer(trainer): hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS} if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]: hyperparameters["distributed_type"] = ( "multi-GPU" if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value ) if trainer.args.world_size > 1: hyperparameters["num_devices"] = trainer.args.world_size if trainer.args.gradient_accumulation_steps > 1: hyperparameters["gradient_accumulation_steps"] = trainer.args.gradient_accumulation_steps total_train_batch_size = ( trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps ) if total_train_batch_size != hyperparameters["train_batch_size"]: hyperparameters["total_train_batch_size"] = total_train_batch_size total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size if total_eval_batch_size != hyperparameters["eval_batch_size"]: hyperparameters["total_eval_batch_size"] = total_eval_batch_size if trainer.args.adafactor: hyperparameters["optimizer"] = "Adafactor" else: hyperparameters["optimizer"] = ( f"Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and" f" epsilon={trainer.args.adam_epsilon}" ) hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value if trainer.args.warmup_ratio != 0.0: hyperparameters["lr_scheduler_warmup_ratio"] = trainer.args.warmup_ratio if trainer.args.warmup_steps != 0.0: hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps if trainer.args.max_steps != -1: hyperparameters["training_steps"] = trainer.args.max_steps else: hyperparameters["num_epochs"] = trainer.args.num_train_epochs if trainer.args.fp16: if trainer.use_cuda_amp: hyperparameters["mixed_precision_training"] = "Native AMP" elif trainer.use_apex: hyperparameters["mixed_precision_training"] = f"Apex, opt level {trainer.args.fp16_opt_level}" if trainer.args.label_smoothing_factor != 0.0: hyperparameters["label_smoothing_factor"] = trainer.args.label_smoothing_factor return hyperparameters
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Configuration base class and utilities.""" import copy import json import os import warnings from dataclasses import dataclass from pathlib import Path from typing import Any, Dict, List, Optional, Union import requests import yaml from huggingface_hub import model_info from huggingface_hub.utils import HFValidationError from . import __version__ from .models.auto.modeling_auto import ( MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_CTC_MAPPING_NAMES, MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, MODEL_FOR_MASKED_LM_MAPPING_NAMES, MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, ) from .training_args import ParallelMode from .utils import ( MODEL_CARD_NAME, cached_file, is_datasets_available, is_offline_mode, is_tf_available, is_tokenizers_available, is_torch_available, logging, ) TASK_MAPPING = { "text-generation": MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, "image-classification": MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, "image-segmentation": MODEL_FOR_IMAGE_SEGMENTATION_MAPPING_NAMES, "fill-mask": MODEL_FOR_MASKED_LM_MAPPING_NAMES, "object-detection": MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, "question-answering": MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, "text2text-generation": MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, "text-classification": MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, "table-question-answering": MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING_NAMES, "token-classification": MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, "audio-classification": MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, "automatic-speech-recognition": {**MODEL_FOR_CTC_MAPPING_NAMES, **MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES}, } logger = logging.get_logger(__name__) class ModelCard: r""" Structured Model Card class. Store model card as well as methods for loading/downloading/saving model cards. Please read the following paper for details and explanation on the sections: "Model Cards for Model Reporting" by Margaret Mitchell, Simone Wu, Andrew Zaldivar, Parker Barnes, Lucy Vasserman, Ben Hutchinson, Elena Spitzer, Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993 Note: A model card can be loaded and saved to disk. """ def __init__(self, **kwargs): warnings.warn( "The class `ModelCard` is deprecated and will be removed in version 5 of Transformers", FutureWarning ) # Recommended attributes from https://arxiv.org/abs/1810.03993 (see papers) self.model_details = kwargs.pop("model_details", {}) self.intended_use = kwargs.pop("intended_use", {}) self.factors = kwargs.pop("factors", {}) self.metrics = kwargs.pop("metrics", {}) self.evaluation_data = kwargs.pop("evaluation_data", {}) self.training_data = kwargs.pop("training_data", {}) self.quantitative_analyses = kwargs.pop("quantitative_analyses", {}) self.ethical_considerations = kwargs.pop("ethical_considerations", {}) self.caveats_and_recommendations = kwargs.pop("caveats_and_recommendations", {}) # Open additional attributes for key, value in kwargs.items(): try: setattr(self, key, value) except AttributeError as err: logger.error(f"Can't set {key} with value {value} for {self}") raise err def save_pretrained(self, save_directory_or_file): """Save a model card object to the directory or file `save_directory_or_file`.""" if os.path.isdir(save_directory_or_file): # If we save using the predefined names, we can load using `from_pretrained` output_model_card_file = os.path.join(save_directory_or_file, MODEL_CARD_NAME) else: output_model_card_file = save_directory_or_file self.to_json_file(output_model_card_file) logger.info(f"Model card saved in {output_model_card_file}") @classmethod def from_pretrained(cls, pretrained_model_name_or_path, **kwargs): r""" Instantiate a [`ModelCard`] from a pre-trained model model card. Parameters: pretrained_model_name_or_path: either: - a string, the *model id* of a pretrained model card hosted inside a model repo on huggingface.co. Valid model ids can be located at the root-level, like `bert-base-uncased`, or namespaced under a user or organization name, like `dbmdz/bert-base-german-cased`. - a path to a *directory* containing a model card file saved using the [`~ModelCard.save_pretrained`] method, e.g.: `./my_model_directory/`. - a path or url to a saved model card JSON *file*, e.g.: `./my_model_directory/modelcard.json`. cache_dir: (*optional*) string: Path to a directory in which a downloaded pre-trained model card should be cached if the standard cache should not be used. kwargs: (*optional*) dict: key/value pairs with which to update the ModelCard object after loading. - The values in kwargs of any keys which are model card attributes will be used to override the loaded values. - Behavior concerning key/value pairs whose keys are *not* model card attributes is controlled by the *return_unused_kwargs* keyword parameter. proxies: (*optional*) dict, default None: A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}. The proxies are used on each request. return_unused_kwargs: (*optional*) bool: - If False, then this function returns just the final model card object. - If True, then this functions returns a tuple *(model card, unused_kwargs)* where *unused_kwargs* is a dictionary consisting of the key/value pairs whose keys are not model card attributes: ie the part of kwargs which has not been used to update *ModelCard* and is otherwise ignored. Examples: ```python # Download model card from huggingface.co and cache. modelcard = ModelCard.from_pretrained("bert-base-uncased") # Model card was saved using *save_pretrained('./test/saved_model/')* modelcard = ModelCard.from_pretrained("./test/saved_model/") modelcard = ModelCard.from_pretrained("./test/saved_model/modelcard.json") modelcard = ModelCard.from_pretrained("bert-base-uncased", output_attentions=True, foo=False) ```""" cache_dir = kwargs.pop("cache_dir", None) proxies = kwargs.pop("proxies", None) return_unused_kwargs = kwargs.pop("return_unused_kwargs", False) from_pipeline = kwargs.pop("_from_pipeline", None) user_agent = {"file_type": "model_card"} if from_pipeline is not None: user_agent["using_pipeline"] = from_pipeline is_local = os.path.isdir(pretrained_model_name_or_path) if os.path.isfile(pretrained_model_name_or_path): resolved_model_card_file = pretrained_model_name_or_path is_local = True else: try: # Load from URL or cache if already cached resolved_model_card_file = cached_file( pretrained_model_name_or_path, filename=MODEL_CARD_NAME, cache_dir=cache_dir, proxies=proxies, user_agent=user_agent, ) if is_local: logger.info(f"loading model card file {resolved_model_card_file}") else: logger.info(f"loading model card file {MODEL_CARD_NAME} from cache at {resolved_model_card_file}") # Load model card modelcard = cls.from_json_file(resolved_model_card_file) except (EnvironmentError, json.JSONDecodeError): # We fall back on creating an empty model card modelcard = cls() # Update model card with kwargs if needed to_remove = [] for key, value in kwargs.items(): if hasattr(modelcard, key): setattr(modelcard, key, value) to_remove.append(key) for key in to_remove: kwargs.pop(key, None) logger.info(f"Model card: {modelcard}") if return_unused_kwargs: return modelcard, kwargs else: return modelcard @classmethod def from_dict(cls, json_object): """Constructs a `ModelCard` from a Python dictionary of parameters.""" return cls(**json_object) @classmethod def from_json_file(cls, json_file): """Constructs a `ModelCard` from a json file of parameters.""" with open(json_file, "r", encoding="utf-8") as reader: text = reader.read() dict_obj = json.loads(text) return cls(**dict_obj) def __eq__(self, other): return self.__dict__ == other.__dict__ def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" def to_json_file(self, json_file_path): """Save this instance to a json file.""" with open(json_file_path, "w", encoding="utf-8") as writer: writer.write(self.to_json_string()) AUTOGENERATED_TRAINER_COMMENT = """ <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> """ AUTOGENERATED_KERAS_COMMENT = """ <!-- This model card has been generated automatically according to the information Keras had access to. You should probably proofread and complete it, then remove this comment. --> """ TASK_TAG_TO_NAME_MAPPING = { "fill-mask": "Masked Language Modeling", "image-classification": "Image Classification", "image-segmentation": "Image Segmentation", "multiple-choice": "Multiple Choice", "object-detection": "Object Detection", "question-answering": "Question Answering", "summarization": "Summarization", "table-question-answering": "Table Question Answering", "text-classification": "Text Classification", "text-generation": "Causal Language Modeling", "text2text-generation": "Sequence-to-sequence Language Modeling", "token-classification": "Token Classification", "translation": "Translation", "zero-shot-classification": "Zero Shot Classification", "automatic-speech-recognition": "Automatic Speech Recognition", } METRIC_TAGS = [ "accuracy", "bleu", "f1", "matthews_correlation", "pearsonr", "precision", "recall", "rouge", "sacrebleu", "spearmanr", "wer", ] def _listify(obj): if obj is None: return [] elif isinstance(obj, str): return [obj] else: return obj def _insert_values_as_list(metadata, name, values): if values is None: return metadata if isinstance(values, str): values = [values] values = [v for v in values if v is not None] if len(values) == 0: return metadata metadata[name] = values return metadata def infer_metric_tags_from_eval_results(eval_results): if eval_results is None: return {} result = {} for key in eval_results.keys(): if key.lower().replace(" ", "_") in METRIC_TAGS: result[key.lower().replace(" ", "_")] = key elif key.lower() == "rouge1": result["rouge"] = key return result def _insert_value(metadata, name, value): if value is None: return metadata metadata[name] = value return metadata def is_hf_dataset(dataset): if not is_datasets_available(): return False from datasets import Dataset, IterableDataset return isinstance(dataset, (Dataset, IterableDataset)) def _get_mapping_values(mapping): result = [] for v in mapping.values(): if isinstance(v, (tuple, list)): result += list(v) else: result.append(v) return result @dataclass class TrainingSummary: model_name: str language: Optional[Union[str, List[str]]] = None license: Optional[str] = None tags: Optional[Union[str, List[str]]] = None finetuned_from: Optional[str] = None tasks: Optional[Union[str, List[str]]] = None dataset: Optional[Union[str, List[str]]] = None dataset_tags: Optional[Union[str, List[str]]] = None dataset_args: Optional[Union[str, List[str]]] = None dataset_metadata: Optional[Dict[str, Any]] = None eval_results: Optional[Dict[str, float]] = None eval_lines: Optional[List[str]] = None hyperparameters: Optional[Dict[str, Any]] = None source: Optional[str] = "trainer" def __post_init__(self): # Infer default license from the checkpoint used, if possible. if ( self.license is None and not is_offline_mode() and self.finetuned_from is not None and len(self.finetuned_from) > 0 ): try: info = model_info(self.finetuned_from) for tag in info.tags: if tag.startswith("license:"): self.license = tag[8:] except (requests.exceptions.HTTPError, HFValidationError): pass def create_model_index(self, metric_mapping): model_index = {"name": self.model_name} # Dataset mapping tag -> name dataset_names = _listify(self.dataset) dataset_tags = _listify(self.dataset_tags) dataset_args = _listify(self.dataset_args) dataset_metadata = _listify(self.dataset_metadata) if len(dataset_args) < len(dataset_tags): dataset_args = dataset_args + [None] * (len(dataset_tags) - len(dataset_args)) dataset_mapping = {tag: name for tag, name in zip(dataset_tags, dataset_names)} dataset_arg_mapping = {tag: arg for tag, arg in zip(dataset_tags, dataset_args)} dataset_metadata_mapping = {tag: metadata for tag, metadata in zip(dataset_tags, dataset_metadata)} task_mapping = { task: TASK_TAG_TO_NAME_MAPPING[task] for task in _listify(self.tasks) if task in TASK_TAG_TO_NAME_MAPPING } model_index["results"] = [] if len(task_mapping) == 0 and len(dataset_mapping) == 0: return [model_index] if len(task_mapping) == 0: task_mapping = {None: None} if len(dataset_mapping) == 0: dataset_mapping = {None: None} # One entry per dataset and per task all_possibilities = [(task_tag, ds_tag) for task_tag in task_mapping for ds_tag in dataset_mapping] for task_tag, ds_tag in all_possibilities: result = {} if task_tag is not None: result["task"] = {"name": task_mapping[task_tag], "type": task_tag} if ds_tag is not None: metadata = dataset_metadata_mapping.get(ds_tag, {}) result["dataset"] = { "name": dataset_mapping[ds_tag], "type": ds_tag, **metadata, } if dataset_arg_mapping[ds_tag] is not None: result["dataset"]["args"] = dataset_arg_mapping[ds_tag] if len(metric_mapping) > 0: result["metrics"] = [] for metric_tag, metric_name in metric_mapping.items(): result["metrics"].append( { "name": metric_name, "type": metric_tag, "value": self.eval_results[metric_name], } ) # Remove partial results to avoid the model card being rejected. if "task" in result and "dataset" in result and "metrics" in result: model_index["results"].append(result) else: logger.info(f"Dropping the following result as it does not have all the necessary fields:\n{result}") return [model_index] def create_metadata(self): metric_mapping = infer_metric_tags_from_eval_results(self.eval_results) metadata = {} metadata = _insert_values_as_list(metadata, "language", self.language) metadata = _insert_value(metadata, "license", self.license) metadata = _insert_values_as_list(metadata, "tags", self.tags) metadata = _insert_values_as_list(metadata, "datasets", self.dataset_tags) metadata = _insert_values_as_list(metadata, "metrics", list(metric_mapping.keys())) metadata["model-index"] = self.create_model_index(metric_mapping) return metadata def to_model_card(self): model_card = "" metadata = yaml.dump(self.create_metadata(), sort_keys=False) if len(metadata) > 0: model_card = f"---\n{metadata}---\n" # Now the model card for realsies. if self.source == "trainer": model_card += AUTOGENERATED_TRAINER_COMMENT else: model_card += AUTOGENERATED_KERAS_COMMENT model_card += f"\n# {self.model_name}\n\n" if self.finetuned_from is None: model_card += "This model was trained from scratch on " else: model_card += ( "This model is a fine-tuned version of" f" [{self.finetuned_from}](https://huggingface.co/{self.finetuned_from}) on " ) if self.dataset is None: model_card += "an unknown dataset." else: if isinstance(self.dataset, str): model_card += f"the {self.dataset} dataset." elif isinstance(self.dataset, (tuple, list)) and len(self.dataset) == 1: model_card += f"the {self.dataset[0]} dataset." else: model_card += ( ", ".join([f"the {ds}" for ds in self.dataset[:-1]]) + f" and the {self.dataset[-1]} datasets." ) if self.eval_results is not None: model_card += "\nIt achieves the following results on the evaluation set:\n" model_card += "\n".join([f"- {name}: {_maybe_round(value)}" for name, value in self.eval_results.items()]) model_card += "\n" model_card += "\n## Model description\n\nMore information needed\n" model_card += "\n## Intended uses & limitations\n\nMore information needed\n" model_card += "\n## Training and evaluation data\n\nMore information needed\n" model_card += "\n## Training procedure\n" model_card += "\n### Training hyperparameters\n" if self.hyperparameters is not None: model_card += "\nThe following hyperparameters were used during training:\n" model_card += "\n".join([f"- {name}: {value}" for name, value in self.hyperparameters.items()]) model_card += "\n" else: model_card += "\nMore information needed\n" if self.eval_lines is not None: model_card += "\n### Training results\n\n" model_card += make_markdown_table(self.eval_lines) model_card += "\n" model_card += "\n### Framework versions\n\n" model_card += f"- Transformers {__version__}\n" if self.source == "trainer" and is_torch_available(): import torch model_card += f"- Pytorch {torch.__version__}\n" elif self.source == "keras" and is_tf_available(): import tensorflow as tf model_card += f"- TensorFlow {tf.__version__}\n" if is_datasets_available(): import datasets model_card += f"- Datasets {datasets.__version__}\n" if is_tokenizers_available(): import tokenizers model_card += f"- Tokenizers {tokenizers.__version__}\n" return model_card @classmethod def from_trainer( cls, trainer, language=None, license=None, tags=None, model_name=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset_metadata=None, dataset=None, dataset_args=None, ): # Infer default from dataset one_dataset = trainer.eval_dataset if trainer.eval_dataset is not None else trainer.train_dataset if is_hf_dataset(one_dataset) and (dataset_tags is None or dataset_args is None or dataset_metadata is None): default_tag = one_dataset.builder_name # Those are not real datasets from the Hub so we exclude them. if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: if dataset_metadata is None: dataset_metadata = [{"config": one_dataset.config_name, "split": str(one_dataset.split)}] if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [one_dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags # Infer default finetuned_from if ( finetuned_from is None and hasattr(trainer.model.config, "_name_or_path") and not os.path.isdir(trainer.model.config._name_or_path) ): finetuned_from = trainer.model.config._name_or_path # Infer default task tag: if tasks is None: model_class_name = trainer.model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task if model_name is None: model_name = Path(trainer.args.output_dir).name if len(model_name) == 0: model_name = finetuned_from # Add `generated_from_trainer` to the tags if tags is None: tags = ["generated_from_trainer"] elif isinstance(tags, str) and tags != "generated_from_trainer": tags = [tags, "generated_from_trainer"] elif "generated_from_trainer" not in tags: tags.append("generated_from_trainer") _, eval_lines, eval_results = parse_log_history(trainer.state.log_history) hyperparameters = extract_hyperparameters_from_trainer(trainer) return cls( language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset=dataset, dataset_tags=dataset_tags, dataset_args=dataset_args, dataset_metadata=dataset_metadata, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, ) @classmethod def from_keras( cls, model, model_name, keras_history=None, language=None, license=None, tags=None, finetuned_from=None, tasks=None, dataset_tags=None, dataset=None, dataset_args=None, ): # Infer default from dataset if dataset is not None: if is_hf_dataset(dataset) and (dataset_tags is None or dataset_args is None): default_tag = dataset.builder_name # Those are not real datasets from the Hub so we exclude them. if default_tag not in ["csv", "json", "pandas", "parquet", "text"]: if dataset_tags is None: dataset_tags = [default_tag] if dataset_args is None: dataset_args = [dataset.config_name] if dataset is None and dataset_tags is not None: dataset = dataset_tags # Infer default finetuned_from if ( finetuned_from is None and hasattr(model.config, "_name_or_path") and not os.path.isdir(model.config._name_or_path) ): finetuned_from = model.config._name_or_path # Infer default task tag: if tasks is None: model_class_name = model.__class__.__name__ for task, mapping in TASK_MAPPING.items(): if model_class_name in _get_mapping_values(mapping): tasks = task # Add `generated_from_keras_callback` to the tags if tags is None: tags = ["generated_from_keras_callback"] elif isinstance(tags, str) and tags != "generated_from_keras_callback": tags = [tags, "generated_from_keras_callback"] elif "generated_from_keras_callback" not in tags: tags.append("generated_from_keras_callback") if keras_history is not None: _, eval_lines, eval_results = parse_keras_history(keras_history) else: eval_lines = [] eval_results = {} hyperparameters = extract_hyperparameters_from_keras(model) return cls( language=language, license=license, tags=tags, model_name=model_name, finetuned_from=finetuned_from, tasks=tasks, dataset_tags=dataset_tags, dataset=dataset, dataset_args=dataset_args, eval_results=eval_results, eval_lines=eval_lines, hyperparameters=hyperparameters, source="keras", ) def parse_keras_history(logs): """ Parse the `logs` of either a `tf.keras.History` object returned by `model.fit()` or an accumulated logs `dict` passed to the `PushToHubCallback`. Returns lines and logs compatible with those returned by `parse_log_history`. """ if hasattr(logs, "history"): # This looks like a `History` object if not hasattr(logs, "epoch"): # This history looks empty, return empty results return None, [], {} logs.history["epoch"] = logs.epoch logs = logs.history else: # Training logs is a list of dicts, let's invert it to a dict of lists to match a History object logs = {log_key: [single_dict[log_key] for single_dict in logs] for log_key in logs[0]} lines = [] for i in range(len(logs["epoch"])): epoch_dict = {log_key: log_value_list[i] for log_key, log_value_list in logs.items()} values = {} for k, v in epoch_dict.items(): if k.startswith("val_"): k = "validation_" + k[4:] elif k != "epoch": k = "train_" + k splits = k.split("_") name = " ".join([part.capitalize() for part in splits]) values[name] = v lines.append(values) eval_results = lines[-1] return logs, lines, eval_results def parse_log_history(log_history): """ Parse the `log_history` of a Trainer to get the intermediate and final evaluation results. """ idx = 0 while idx < len(log_history) and "train_runtime" not in log_history[idx]: idx += 1 # If there are no training logs if idx == len(log_history): idx -= 1 while idx >= 0 and "eval_loss" not in log_history[idx]: idx -= 1 if idx >= 0: return None, None, log_history[idx] else: return None, None, None # From now one we can assume we have training logs: train_log = log_history[idx] lines = [] training_loss = "No log" for i in range(idx): if "loss" in log_history[i]: training_loss = log_history[i]["loss"] if "eval_loss" in log_history[i]: metrics = log_history[i].copy() _ = metrics.pop("total_flos", None) epoch = metrics.pop("epoch", None) step = metrics.pop("step", None) _ = metrics.pop("eval_runtime", None) _ = metrics.pop("eval_samples_per_second", None) _ = metrics.pop("eval_steps_per_second", None) _ = metrics.pop("eval_jit_compilation_time", None) values = {"Training Loss": training_loss, "Epoch": epoch, "Step": step} for k, v in metrics.items(): if k == "eval_loss": values["Validation Loss"] = v else: splits = k.split("_") name = " ".join([part.capitalize() for part in splits[1:]]) values[name] = v lines.append(values) idx = len(log_history) - 1 while idx >= 0 and "eval_loss" not in log_history[idx]: idx -= 1 if idx > 0: eval_results = {} for key, value in log_history[idx].items(): if key.startswith("eval_"): key = key[5:] if key not in ["runtime", "samples_per_second", "steps_per_second", "epoch", "step"]: camel_cased_key = " ".join([part.capitalize() for part in key.split("_")]) eval_results[camel_cased_key] = value return train_log, lines, eval_results else: return train_log, lines, None def extract_hyperparameters_from_keras(model): import tensorflow as tf hyperparameters = {} if hasattr(model, "optimizer") and model.optimizer is not None: hyperparameters["optimizer"] = model.optimizer.get_config() else: hyperparameters["optimizer"] = None hyperparameters["training_precision"] = tf.keras.mixed_precision.global_policy().name return hyperparameters def _maybe_round(v, decimals=4): if isinstance(v, float) and len(str(v).split(".")) > 1 and len(str(v).split(".")[1]) > decimals: return f"{v:.{decimals}f}" return str(v) def _regular_table_line(values, col_widths): values_with_space = [f"| {v}" + " " * (w - len(v) + 1) for v, w in zip(values, col_widths)] return "".join(values_with_space) + "|\n" def _second_table_line(col_widths): values = ["|:" + "-" * w + ":" for w in col_widths] return "".join(values) + "|\n" def make_markdown_table(lines): """ Create a nice Markdown table from the results in `lines`. """ if lines is None or len(lines) == 0: return "" col_widths = {key: len(str(key)) for key in lines[0].keys()} for line in lines: for key, value in line.items(): if col_widths[key] < len(_maybe_round(value)): col_widths[key] = len(_maybe_round(value)) table = _regular_table_line(list(lines[0].keys()), list(col_widths.values())) table += _second_table_line(list(col_widths.values())) for line in lines: table += _regular_table_line([_maybe_round(v) for v in line.values()], list(col_widths.values())) return table _TRAINING_ARGS_KEYS = [ "learning_rate", "train_batch_size", "eval_batch_size", "seed", ] def extract_hyperparameters_from_trainer(trainer): hyperparameters = {k: getattr(trainer.args, k) for k in _TRAINING_ARGS_KEYS} if trainer.args.parallel_mode not in [ParallelMode.NOT_PARALLEL, ParallelMode.NOT_DISTRIBUTED]: hyperparameters["distributed_type"] = ( "multi-GPU" if trainer.args.parallel_mode == ParallelMode.DISTRIBUTED else trainer.args.parallel_mode.value ) if trainer.args.world_size > 1: hyperparameters["num_devices"] = trainer.args.world_size if trainer.args.gradient_accumulation_steps > 1: hyperparameters["gradient_accumulation_steps"] = trainer.args.gradient_accumulation_steps total_train_batch_size = ( trainer.args.train_batch_size * trainer.args.world_size * trainer.args.gradient_accumulation_steps ) if total_train_batch_size != hyperparameters["train_batch_size"]: hyperparameters["total_train_batch_size"] = total_train_batch_size total_eval_batch_size = trainer.args.eval_batch_size * trainer.args.world_size if total_eval_batch_size != hyperparameters["eval_batch_size"]: hyperparameters["total_eval_batch_size"] = total_eval_batch_size if trainer.args.adafactor: hyperparameters["optimizer"] = "Adafactor" else: hyperparameters["optimizer"] = ( f"Adam with betas=({trainer.args.adam_beta1},{trainer.args.adam_beta2}) and" f" epsilon={trainer.args.adam_epsilon}" ) hyperparameters["lr_scheduler_type"] = trainer.args.lr_scheduler_type.value if trainer.args.warmup_ratio != 0.0: hyperparameters["lr_scheduler_warmup_ratio"] = trainer.args.warmup_ratio if trainer.args.warmup_steps != 0.0: hyperparameters["lr_scheduler_warmup_steps"] = trainer.args.warmup_steps if trainer.args.max_steps != -1: hyperparameters["training_steps"] = trainer.args.max_steps else: hyperparameters["num_epochs"] = trainer.args.num_train_epochs if trainer.args.fp16: if trainer.use_cuda_amp: hyperparameters["mixed_precision_training"] = "Native AMP" elif trainer.use_apex: hyperparameters["mixed_precision_training"] = f"Apex, opt level {trainer.args.fp16_opt_level}" if trainer.args.label_smoothing_factor != 0.0: hyperparameters["label_smoothing_factor"] = trainer.args.label_smoothing_factor return hyperparameters
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./examples/legacy/question-answering/run_squad.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" import argparse import glob import logging import os import random import timeit import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): try: # set global_step to gobal_step of last saved checkpoint from model path checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproductibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]: del inputs["token_type_ids"] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) # model outputs are always tuple in transformers (see doc) loss = outputs[0] if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]: del inputs["token_type_ids"] feature_indices = batch[3] # XLNet and XLM use more arguments for their predictions if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) # for lang_id-sensitive xlm models if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) for i, feature_index in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs.to_tuple()] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # XLNet and XLM use a more complex post-processing procedure if args.model_type in ["xlnet", "xlm"]: start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir if args.data_dir else "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): try: import tensorflow_datasets as tfds except ImportError: raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") if args.version_2_with_negative: logger.warning("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_TYPES), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help=( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ), ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--verbose_logging", action="store_true", help=( "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation." ), ) parser.add_argument( "--lang_id", default=0, type=int, help=( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ), ) parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() if args.doc_stride >= args.max_seq_length - args.max_query_length: logger.warning( "WARNING - You've set a doc stride which may be superior to the document length in some " "examples. This could result in errors when building features from the examples. Please reduce the doc " "stride or increase the maximum length to ensure the features are correctly built." ) if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = AutoModelForQuestionAnswering.from_pretrained(args.output_dir) # , force_download=True) # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling # So we use use_fast=False here for now until Fast-tokenizer-compatible-examples are out tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case, use_fast=False) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] else: logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path) checkpoints = [args.model_name_or_path] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = AutoModelForQuestionAnswering.from_pretrained(checkpoint) # , force_download=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = {k + ("_{}".format(global_step) if global_step else ""): v for k, v in result.items()} results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Finetuning the library models for question-answering on SQuAD (DistilBERT, Bert, XLM, XLNet).""" import argparse import glob import logging import os import random import timeit import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers from transformers import ( MODEL_FOR_QUESTION_ANSWERING_MAPPING, WEIGHTS_NAME, AdamW, AutoConfig, AutoModelForQuestionAnswering, AutoTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CONFIG_CLASSES = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) MODEL_TYPES = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": args.weight_decay, }, {"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0}, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 1 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): try: # set global_step to gobal_step of last saved checkpoint from model path checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproductibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]: del inputs["token_type_ids"] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) # model outputs are always tuple in transformers (see doc) loss = outputs[0] if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu evaluate if args.n_gpu > 1 and not isinstance(model, torch.nn.DataParallel): model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert", "bart", "longformer"]: del inputs["token_type_ids"] feature_indices = batch[3] # XLNet and XLM use more arguments for their predictions if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) # for lang_id-sensitive xlm models if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) outputs = model(**inputs) for i, feature_index in enumerate(feature_indices): eval_feature = features[feature_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs.to_tuple()] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # XLNet and XLM use a more complex post-processing procedure if args.model_type in ["xlnet", "xlm"]: start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir if args.data_dir else "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), ), ) # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): try: import tensorflow_datasets as tfds except ImportError: raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") if args.version_2_with_negative: logger.warning("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_TYPES), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help=( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ), ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--verbose_logging", action="store_true", help=( "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation." ), ) parser.add_argument( "--lang_id", default=0, type=int, help=( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ), ) parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() if args.doc_stride >= args.max_seq_length - args.max_query_length: logger.warning( "WARNING - You've set a doc stride which may be superior to the document length in some " "examples. This could result in errors when building features from the examples. Please reduce the doc " "stride or increase the maximum length to ensure the features are correctly built." ) if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config = AutoConfig.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = AutoTokenizer.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, use_fast=False, # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling ) model = AutoModelForQuestionAnswering.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = AutoModelForQuestionAnswering.from_pretrained(args.output_dir) # , force_download=True) # SquadDataset is not compatible with Fast tokenizers which have a smarter overflow handeling # So we use use_fast=False here for now until Fast-tokenizer-compatible-examples are out tokenizer = AutoTokenizer.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case, use_fast=False) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] else: logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path) checkpoints = [args.model_name_or_path] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = AutoModelForQuestionAnswering.from_pretrained(checkpoint) # , force_download=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = {k + ("_{}".format(global_step) if global_step else ""): v for k, v in result.items()} results.update(result) logger.info("Results: {}".format(results)) return results if __name__ == "__main__": main()
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/chinese_clip/modeling_chinese_clip.py
# coding=utf-8 # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Chinese-CLIP model.""" import math from dataclasses import dataclass from typing import Any, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16" _CONFIG_FOR_DOC = "ChineseCLIPConfig" CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ "OFA-Sys/chinese-clip-vit-base-patch16", # See all Chinese-CLIP models at https://huggingface.co/models?filter=chinese_clip ] # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html # Copied from transformers.models.clip.modeling_clip.contrastive_loss def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass class ChineseCLIPOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPTextModel`]. image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPVisionModel`]. text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPTextModel`]. vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText class ChineseCLIPTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP class ChineseCLIPVisionEmbeddings(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1))) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText class ChineseCLIPTextSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText class ChineseCLIPTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText class ChineseCLIPTextAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type) self.output = ChineseCLIPTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class ChineseCLIPVisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText class ChineseCLIPTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText class ChineseCLIPTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision class ChineseCLIPVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText class ChineseCLIPTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ChineseCLIPTextAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute") self.intermediate = ChineseCLIPTextIntermediate(config) self.output = ChineseCLIPTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class ChineseCLIPVisionLayer(nn.Module): def __init__(self, config: ChineseCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = ChineseCLIPVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = ChineseCLIPVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText class ChineseCLIPTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class ChineseCLIPPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ChineseCLIPConfig base_model_prefix = "chinese_clip" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, ChineseCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, ChineseCLIPTextEmbeddings): nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: if embedding.padding_idx is not None: embedding.weight.data[embedding.padding_idx].zero_() elif isinstance(module, ChineseCLIPVisionAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, ChineseCLIPVisionMLP): factor = self.config.initializer_factor in_proj_std = ( (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor ) fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, ChineseCLIPModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ChineseCLIPVisionEncoder) or isinstance(module, ChineseCLIPTextEncoder): module.gradient_checkpointing = value CHINESE_CLIP_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CHINESE_CLIP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText class ChineseCLIPTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class ChineseCLIPVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ChineseCLIPVisionEncoderLayer`]. Args: config: ChineseCLIPConfig """ def __init__(self, config: ChineseCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, ) else: layer_outputs = encoder_layer( hidden_states, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class ChineseCLIPVisionTransformer(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = ChineseCLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = ChineseCLIPVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The text model from CHINESE_CLIP without any head or projection on top.", CHINESE_CLIP_START_DOCSTRING, ) class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ config_class = ChineseCLIPTextConfig def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = ChineseCLIPTextEmbeddings(config) self.encoder = ChineseCLIPTextEncoder(config) self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """The vision model from CHINESE_CLIP without any head or projection on top.""", CHINESE_CLIP_START_DOCSTRING, ) class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPVisionConfig main_input_name = "pixel_values" def __init__(self, config: ChineseCLIPVisionConfig): super().__init__(config) self.vision_model = ChineseCLIPVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @add_start_docstrings(CHINESE_CLIP_START_DOCSTRING) class ChineseCLIPModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPConfig def __init__(self, config: ChineseCLIPConfig): super().__init__(config) if not isinstance(config.text_config, ChineseCLIPTextConfig): raise ValueError( "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, ChineseCLIPVisionConfig): raise ValueError( "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) self.vision_model = ChineseCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the final [CLS] hidden state of Text-Transformer. Examples: ```python >>> from transformers import AutoTokenizer, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[0][:, 0, :] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the final [CLS] hidden state of Vision-Transformer. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ChineseCLIPOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[0][:, 0, :] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = chinese_clip_loss(logits_per_text) if not return_dict: # fix the None pooled_output of text_outputs to conform with dict_output pooled_output = text_outputs[1] if pooled_output is None: text_outputs = (text_outputs[0],) + text_outputs[2:] output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return ChineseCLIPOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
# coding=utf-8 # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Chinese-CLIP model.""" import math from dataclasses import dataclass from typing import Any, List, Optional, Tuple, Union import torch import torch.utils.checkpoint from torch import nn from ...activations import ACT2FN from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_chinese_clip import ChineseCLIPConfig, ChineseCLIPTextConfig, ChineseCLIPVisionConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "OFA-Sys/chinese-clip-vit-base-patch16" _CONFIG_FOR_DOC = "ChineseCLIPConfig" CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST = [ "OFA-Sys/chinese-clip-vit-base-patch16", # See all Chinese-CLIP models at https://huggingface.co/models?filter=chinese_clip ] # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html # Copied from transformers.models.clip.modeling_clip.contrastive_loss def contrastive_loss(logits: torch.Tensor) -> torch.Tensor: return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device)) def chinese_clip_loss(similarity: torch.Tensor) -> torch.Tensor: caption_loss = contrastive_loss(similarity) image_loss = contrastive_loss(similarity.t()) return (caption_loss + image_loss) / 2.0 @dataclass class ChineseCLIPOutput(ModelOutput): """ Args: loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`): Contrastive loss for image-text similarity. logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`): The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text similarity scores. logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`): The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image similarity scores. text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPTextModel`]. image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the pooled output of [`ChineseCLIPVisionModel`]. text_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPTextModel`]. vision_model_output(`BaseModelOutputWithPoolingAndCrossAttentions`): The output of the [`ChineseCLIPVisionModel`]. """ loss: Optional[torch.FloatTensor] = None logits_per_image: torch.FloatTensor = None logits_per_text: torch.FloatTensor = None text_embeds: torch.FloatTensor = None image_embeds: torch.FloatTensor = None text_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None vision_model_output: BaseModelOutputWithPoolingAndCrossAttentions = None def to_tuple(self) -> Tuple[Any]: return tuple( self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple() for k in self.keys() ) # Copied from transformers.models.bert.modeling_bert.BertEmbeddings with Bert->ChineseCLIPText class ChineseCLIPTextEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.register_buffer( "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False ) def forward( self, input_ids: Optional[torch.LongTensor] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, past_key_values_length: int = 0, ) -> torch.Tensor: if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves # issue #5664 if token_type_ids is None: if hasattr(self, "token_type_ids"): buffered_token_type_ids = self.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.clip.modeling_clip.CLIPVisionEmbeddings with CLIP->ChineseCLIP class ChineseCLIPVisionEmbeddings(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config self.embed_dim = config.hidden_size self.image_size = config.image_size self.patch_size = config.patch_size self.class_embedding = nn.Parameter(torch.randn(self.embed_dim)) self.patch_embedding = nn.Conv2d( in_channels=config.num_channels, out_channels=self.embed_dim, kernel_size=self.patch_size, stride=self.patch_size, bias=False, ) self.num_patches = (self.image_size // self.patch_size) ** 2 self.num_positions = self.num_patches + 1 self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim) self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1))) def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor: batch_size = pixel_values.shape[0] patch_embeds = self.patch_embedding(pixel_values) # shape = [*, width, grid, grid] patch_embeds = patch_embeds.flatten(2).transpose(1, 2) class_embeds = self.class_embedding.expand(batch_size, 1, -1) embeddings = torch.cat([class_embeds, patch_embeds], dim=1) embeddings = embeddings + self.position_embedding(self.position_ids) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->ChineseCLIPText class ChineseCLIPTextSelfAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention " f"heads ({config.num_attention_heads})" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = position_embedding_type or getattr( config, "position_embedding_type", "absolute" ) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor: new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) use_cache = past_key_value is not None if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": query_length, key_length = query_layer.shape[2], key_layer.shape[2] if use_cache: position_ids_l = torch.tensor(key_length - 1, dtype=torch.long, device=hidden_states.device).view( -1, 1 ) else: position_ids_l = torch.arange(query_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(key_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in ChineseCLIPTextModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.functional.softmax(attention_scores, dim=-1) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput with Bert->ChineseCLIPText class ChineseCLIPTextSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->ChineseCLIPText class ChineseCLIPTextAttention(nn.Module): def __init__(self, config, position_embedding_type=None): super().__init__() self.self = ChineseCLIPTextSelfAttention(config, position_embedding_type=position_embedding_type) self.output = ChineseCLIPTextSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class ChineseCLIPVisionAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__(self, config): super().__init__() self.config = config self.embed_dim = config.hidden_size self.num_heads = config.num_attention_heads self.head_dim = self.embed_dim // self.num_heads if self.head_dim * self.num_heads != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" f" {self.num_heads})." ) self.scale = self.head_dim**-0.5 self.dropout = config.attention_dropout self.k_proj = nn.Linear(self.embed_dim, self.embed_dim) self.v_proj = nn.Linear(self.embed_dim, self.embed_dim) self.q_proj = nn.Linear(self.embed_dim, self.embed_dim) self.out_proj = nn.Linear(self.embed_dim, self.embed_dim) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scale key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len): raise ValueError( f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is" f" {attn_weights.size()}" ) attn_weights = nn.functional.softmax(attn_weights, dim=-1) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim): raise ValueError( f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is" f" {attn_output.size()}" ) attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) attn_output = attn_output.transpose(1, 2) attn_output = attn_output.reshape(bsz, tgt_len, embed_dim) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped # Copied from transformers.models.bert.modeling_bert.BertIntermediate with Bert->ChineseCLIPText class ChineseCLIPTextIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->ChineseCLIPText class ChineseCLIPTextOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor: hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->ChineseCLIPVision class ChineseCLIPVisionMLP(nn.Module): def __init__(self, config): super().__init__() self.config = config self.activation_fn = ACT2FN[config.hidden_act] self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size) self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size) def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: hidden_states = self.fc1(hidden_states) hidden_states = self.activation_fn(hidden_states) hidden_states = self.fc2(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->ChineseCLIPText class ChineseCLIPTextLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ChineseCLIPTextAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: if not self.is_decoder: raise ValueError(f"{self} should be used as a decoder model if cross attention is added") self.crossattention = ChineseCLIPTextAttention(config, position_embedding_type="absolute") self.intermediate = ChineseCLIPTextIntermediate(config) self.output = ChineseCLIPTextOutput(config) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_value: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, output_attentions: Optional[bool] = False, ) -> Tuple[torch.Tensor]: # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers" " by setting `config.add_cross_attention=True`" ) # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class ChineseCLIPVisionLayer(nn.Module): def __init__(self, config: ChineseCLIPConfig): super().__init__() self.embed_dim = config.hidden_size self.self_attn = ChineseCLIPVisionAttention(config) self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) self.mlp = ChineseCLIPVisionMLP(config) self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps) def forward( self, hidden_states: torch.Tensor, output_attentions: Optional[bool] = False, ) -> Tuple[torch.FloatTensor]: """ Args: hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)` output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.layer_norm1(hidden_states) hidden_states, attn_weights = self.self_attn( hidden_states=hidden_states, output_attentions=output_attentions, ) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.layer_norm2(hidden_states) hidden_states = self.mlp(hidden_states) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs # Copied from transformers.models.bert.modeling_bert.BertPooler with Bert->ChineseCLIPText class ChineseCLIPTextPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states: torch.Tensor) -> torch.Tensor: # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class ChineseCLIPPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ChineseCLIPConfig base_model_prefix = "chinese_clip" supports_gradient_checkpointing = True _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """Initialize the weights""" factor = self.config.initializer_factor if isinstance(module, ChineseCLIPVisionEmbeddings): factor = self.config.initializer_factor nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor) nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor) nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor) elif isinstance(module, ChineseCLIPTextEmbeddings): nn.init.normal_(module.word_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.position_embeddings.weight, mean=0.0, std=self.config.initializer_range) nn.init.normal_(module.token_type_embeddings.weight, mean=0.0, std=self.config.initializer_range) for embedding in [module.word_embeddings, module.position_embeddings, module.token_type_embeddings]: if embedding.padding_idx is not None: embedding.weight.data[embedding.padding_idx].zero_() elif isinstance(module, ChineseCLIPVisionAttention): factor = self.config.initializer_factor in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor out_proj_std = (module.embed_dim**-0.5) * factor nn.init.normal_(module.q_proj.weight, std=in_proj_std) nn.init.normal_(module.k_proj.weight, std=in_proj_std) nn.init.normal_(module.v_proj.weight, std=in_proj_std) nn.init.normal_(module.out_proj.weight, std=out_proj_std) elif isinstance(module, ChineseCLIPVisionMLP): factor = self.config.initializer_factor in_proj_std = ( (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor ) fc_std = (2 * module.config.hidden_size) ** -0.5 * factor nn.init.normal_(module.fc1.weight, std=fc_std) nn.init.normal_(module.fc2.weight, std=in_proj_std) elif isinstance(module, ChineseCLIPModel): nn.init.normal_( module.text_projection.weight, std=module.text_embed_dim**-0.5 * self.config.initializer_factor, ) nn.init.normal_( module.visual_projection.weight, std=module.vision_embed_dim**-0.5 * self.config.initializer_factor, ) if isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, ChineseCLIPVisionEncoder) or isinstance(module, ChineseCLIPTextEncoder): module.gradient_checkpointing = value CHINESE_CLIP_START_DOCSTRING = r""" This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`ChineseCLIPConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ CHINESE_CLIP_TEXT_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CHINESE_CLIP_VISION_INPUTS_DOCSTRING = r""" Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ CHINESE_CLIP_INPUTS_DOCSTRING = r""" Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`ChineseCLIPImageProcessor.__call__`] for details. return_loss (`bool`, *optional*): Whether or not to return the contrastive loss. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->ChineseCLIPText class ChineseCLIPTextEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ChineseCLIPTextLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = False, output_hidden_states: Optional[bool] = False, return_dict: Optional[bool] = True, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPastAndCrossAttentions]: all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class ChineseCLIPVisionEncoder(nn.Module): """ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a [`ChineseCLIPVisionEncoderLayer`]. Args: config: ChineseCLIPConfig """ def __init__(self, config: ChineseCLIPConfig): super().__init__() self.config = config self.layers = nn.ModuleList([ChineseCLIPVisionLayer(config) for _ in range(config.num_hidden_layers)]) self.gradient_checkpointing = False def forward( self, inputs_embeds, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutput]: r""" Args: inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None hidden_states = inputs_embeds for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, ) else: layer_outputs = encoder_layer( hidden_states, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class ChineseCLIPVisionTransformer(nn.Module): def __init__(self, config: ChineseCLIPVisionConfig): super().__init__() self.config = config embed_dim = config.hidden_size self.embeddings = ChineseCLIPVisionEmbeddings(config) self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) self.encoder = ChineseCLIPVisionEncoder(config) self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps) @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values") hidden_states = self.embeddings(pixel_values) hidden_states = self.pre_layrnorm(hidden_states) encoder_outputs = self.encoder( inputs_embeds=hidden_states, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) last_hidden_state = encoder_outputs[0] pooled_output = last_hidden_state[:, 0, :] pooled_output = self.post_layernorm(pooled_output) if not return_dict: return (last_hidden_state, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=last_hidden_state, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The text model from CHINESE_CLIP without any head or projection on top.", CHINESE_CLIP_START_DOCSTRING, ) class ChineseCLIPTextModel(ChineseCLIPPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in [Attention is all you need](https://arxiv.org/abs/1706.03762) by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. To behave as an decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to `True`. To be used in a Seq2Seq model, the model needs to initialized with both `is_decoder` argument and `add_cross_attention` set to `True`; an `encoder_hidden_states` is then expected as an input to the forward pass. """ config_class = ChineseCLIPTextConfig def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = ChineseCLIPTextEmbeddings(config) self.encoder = ChineseCLIPTextEncoder(config) self.pooler = ChineseCLIPTextPooler(config) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPoolingAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, head_mask: Optional[torch.Tensor] = None, inputs_embeds: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]: r""" encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") batch_size, seq_length = input_shape device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) if token_type_ids is None: if hasattr(self.embeddings, "token_type_ids"): buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length] buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length) token_type_ids = buffered_token_type_ids_expanded else: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=sequence_output, pooler_output=pooled_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) @add_start_docstrings( """The vision model from CHINESE_CLIP without any head or projection on top.""", CHINESE_CLIP_START_DOCSTRING, ) class ChineseCLIPVisionModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPVisionConfig main_input_name = "pixel_values" def __init__(self, config: ChineseCLIPVisionConfig): super().__init__(config) self.vision_model = ChineseCLIPVisionTransformer(config) # Initialize weights and apply final processing self.post_init() def get_input_embeddings(self) -> nn.Module: return self.vision_model.embeddings.patch_embedding @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=ChineseCLIPVisionConfig) def forward( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, BaseModelOutputWithPooling]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import CLIPProcessor, ChineseCLIPVisionModel >>> model = ChineseCLIPVisionModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = CLIPProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_state = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output # pooled CLS states ```""" return_dict = return_dict if return_dict is not None else self.config.use_return_dict return self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) @add_start_docstrings(CHINESE_CLIP_START_DOCSTRING) class ChineseCLIPModel(ChineseCLIPPreTrainedModel): config_class = ChineseCLIPConfig def __init__(self, config: ChineseCLIPConfig): super().__init__(config) if not isinstance(config.text_config, ChineseCLIPTextConfig): raise ValueError( "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, ChineseCLIPVisionConfig): raise ValueError( "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) text_config = config.text_config vision_config = config.vision_config self.projection_dim = config.projection_dim self.text_embed_dim = text_config.hidden_size self.vision_embed_dim = vision_config.hidden_size self.text_model = ChineseCLIPTextModel(text_config, add_pooling_layer=False) self.vision_model = ChineseCLIPVisionTransformer(vision_config) self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False) self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False) self.logit_scale = nn.Parameter(torch.ones([]) * self.config.logit_scale_init_value) # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(CHINESE_CLIP_TEXT_INPUTS_DOCSTRING) def get_text_features( self, input_ids: Optional[torch.Tensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying the projection layer to the final [CLS] hidden state of Text-Transformer. Examples: ```python >>> from transformers import AutoTokenizer, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> tokenizer = AutoTokenizer.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> inputs = tokenizer(["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], padding=True, return_tensors="pt") >>> text_features = model.get_text_features(**inputs) >>> text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True) ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = text_outputs[0][:, 0, :] text_features = self.text_projection(pooled_output) return text_features @add_start_docstrings_to_model_forward(CHINESE_CLIP_VISION_INPUTS_DOCSTRING) def get_image_features( self, pixel_values: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> torch.FloatTensor: r""" Returns: image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying the projection layer to the final [CLS] hidden state of Vision-Transformer. Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(images=image, return_tensors="pt") >>> image_features = model.get_image_features(**inputs) >>> image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True) ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = vision_outputs[1] # pooled_output image_features = self.visual_projection(pooled_output) return image_features @add_start_docstrings_to_model_forward(CHINESE_CLIP_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=ChineseCLIPOutput, config_class=ChineseCLIPConfig) def forward( self, input_ids: Optional[torch.LongTensor] = None, pixel_values: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.Tensor] = None, token_type_ids: Optional[torch.Tensor] = None, position_ids: Optional[torch.LongTensor] = None, return_loss: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, ChineseCLIPOutput]: r""" Returns: Examples: ```python >>> from PIL import Image >>> import requests >>> from transformers import AutoProcessor, ChineseCLIPModel >>> model = ChineseCLIPModel.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> processor = AutoProcessor.from_pretrained("OFA-Sys/chinese-clip-vit-base-patch16") >>> url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg" >>> image = Image.open(requests.get(url, stream=True).raw) >>> inputs = processor(text=["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"], images=image, return_tensors="pt", padding=True) >>> outputs = model(**inputs) >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities ```""" # Use CHINESE_CLIP model's config for some fields (if specified) instead of those of vision & text components. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict vision_outputs = self.vision_model( pixel_values=pixel_values, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) text_outputs = self.text_model( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) image_embeds = vision_outputs[1] image_embeds = self.visual_projection(image_embeds) text_embeds = text_outputs[0][:, 0, :] text_embeds = self.text_projection(text_embeds) # normalized features image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True) text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True) # cosine similarity as logits logit_scale = self.logit_scale.exp() logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale logits_per_image = logits_per_text.t() loss = None if return_loss: loss = chinese_clip_loss(logits_per_text) if not return_dict: # fix the None pooled_output of text_outputs to conform with dict_output pooled_output = text_outputs[1] if pooled_output is None: text_outputs = (text_outputs[0],) + text_outputs[2:] output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs) return ((loss,) + output) if loss is not None else output return ChineseCLIPOutput( loss=loss, logits_per_image=logits_per_image, logits_per_text=logits_per_text, text_embeds=text_embeds, image_embeds=image_embeds, text_model_output=text_outputs, vision_model_output=vision_outputs, )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./examples/research_projects/seq2seq-distillation/_test_seq2seq_examples.py
import argparse import logging import os import sys import tempfile from pathlib import Path import lightning_base import pytest import pytorch_lightning as pl import torch from convert_pl_checkpoint_to_hf import convert_pl_to_hf from distillation import distill_main from finetune import SummarizationModule, main from huggingface_hub import list_models from parameterized import parameterized from run_eval import generate_summaries_or_translations from torch import nn from transformers import AutoConfig, AutoModelForSeq2SeqLM from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow from utils import label_smoothed_nll_loss, lmap, load_json logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() CUDA_AVAILABLE = torch.cuda.is_available() CHEAP_ARGS = { "max_tokens_per_batch": None, "supervise_forward": True, "normalize_hidden": True, "label_smoothing": 0.2, "eval_max_gen_length": None, "eval_beams": 1, "val_metric": "loss", "save_top_k": 1, "adafactor": True, "early_stopping_patience": 2, "logger_name": "default", "length_penalty": 0.5, "cache_dir": "", "task": "summarization", "num_workers": 2, "alpha_hid": 0, "freeze_embeds": True, "enc_only": False, "tgt_suffix": "", "resume_from_checkpoint": None, "sortish_sampler": True, "student_decoder_layers": 1, "val_check_interval": 1.0, "output_dir": "", "fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp "no_teacher": False, "fp16_opt_level": "O1", "gpus": 1 if CUDA_AVAILABLE else 0, "n_tpu_cores": 0, "max_grad_norm": 1.0, "do_train": True, "do_predict": True, "accumulate_grad_batches": 1, "server_ip": "", "server_port": "", "seed": 42, "model_name_or_path": "sshleifer/bart-tiny-random", "config_name": "", "tokenizer_name": "facebook/bart-large", "do_lower_case": False, "learning_rate": 0.3, "lr_scheduler": "linear", "weight_decay": 0.0, "adam_epsilon": 1e-08, "warmup_steps": 0, "max_epochs": 1, "train_batch_size": 2, "eval_batch_size": 2, "max_source_length": 12, "max_target_length": 12, "val_max_target_length": 12, "test_max_target_length": 12, "fast_dev_run": False, "no_cache": False, "n_train": -1, "n_val": -1, "n_test": -1, "student_encoder_layers": 1, "freeze_encoder": False, "auto_scale_batch_size": False, "overwrite_output_dir": False, "student": None, } def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" T5_TINIER = "sshleifer/t5-tinier-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" FSMT_TINY = "stas/tiny-wmt19-en-de" stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestSummarizationDistiller(TestCasePlus): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks return cls @slow @require_torch_gpu def test_hub_configs(self): """I put require_torch_gpu cause I only want this to run with self-scheduled.""" model_list = list_models() org = "sshleifer" model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)] allowed_to_be_broken = ["sshleifer/blenderbot-3B", "sshleifer/blenderbot-90M"] failures = [] for m in model_ids: if m in allowed_to_be_broken: continue try: AutoConfig.from_pretrained(m) except Exception: failures.append(m) assert not failures, f"The following models could not be loaded through AutoConfig: {failures}" def test_distill_no_teacher(self): updates = {"student_encoder_layers": 2, "student_decoder_layers": 1, "no_teacher": True} self._test_distiller_cli(updates) def test_distill_checkpointing_with_teacher(self): updates = { "student_encoder_layers": 2, "student_decoder_layers": 1, "max_epochs": 4, "val_check_interval": 0.25, "alpha_hid": 2.0, "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", } model = self._test_distiller_cli(updates, check_contents=False) ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) self.assertEqual(len(transformer_ckpts), 2) examples = lmap(str.strip, Path(model.hparams.data_dir).joinpath("test.source").open().readlines()) out_path = tempfile.mktemp() # XXX: not being cleaned up generate_summaries_or_translations(examples, out_path, str(model.output_dir / "best_tfmr")) self.assertTrue(Path(out_path).exists()) out_path_new = self.get_auto_remove_tmp_dir() convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new) assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin")) def test_loss_fn(self): model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY) input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"] target_ids = torch.tensor([[0, 4, 8, 2], [0, 8, 2, 1]], dtype=torch.long, device=model.device) decoder_input_ids = target_ids[:, :-1].contiguous() # Why this line? lm_labels = target_ids[:, 1:].clone() # why clone? model_computed_loss = model( input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, labels=lm_labels, use_cache=False ).loss logits = model(input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, use_cache=False).logits lprobs = nn.functional.log_softmax(logits, dim=-1) smoothed_loss, nll_loss = label_smoothed_nll_loss( lprobs, lm_labels, 0.1, ignore_index=model.config.pad_token_id ) with self.assertRaises(AssertionError): # TODO: understand why this breaks self.assertEqual(nll_loss, model_computed_loss) def test_distill_mbart(self): updates = { "student_encoder_layers": 2, "student_decoder_layers": 1, "num_train_epochs": 4, "val_check_interval": 0.25, "alpha_hid": 2.0, "task": "translation", "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", "tokenizer_name": MBART_TINY, "teacher": MBART_TINY, "src_lang": "en_XX", "tgt_lang": "ro_RO", } model = self._test_distiller_cli(updates, check_contents=False) assert model.model.config.model_type == "mbart" ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) all_files = list(Path(model.output_dir).glob("best_tfmr/*")) assert len(all_files) > 2 self.assertEqual(len(transformer_ckpts), 2) def test_distill_t5(self): updates = { "student_encoder_layers": 1, "student_decoder_layers": 1, "alpha_hid": 2.0, "teacher": T5_TINY, "model_name_or_path": T5_TINY, "tokenizer_name": T5_TINY, } self._test_distiller_cli(updates) def test_distill_different_base_models(self): updates = { "teacher": T5_TINY, "student": T5_TINIER, "model_name_or_path": T5_TINIER, "tokenizer_name": T5_TINIER, } self._test_distiller_cli(updates) def _test_distiller_cli(self, updates, check_contents=True): default_updates = { "label_smoothing": 0.0, "early_stopping_patience": -1, "train_batch_size": 1, "eval_batch_size": 2, "max_epochs": 2, "alpha_mlm": 0.2, "alpha_ce": 0.8, "do_predict": True, "model_name_or_path": "sshleifer/tinier_bart", "teacher": CHEAP_ARGS["model_name_or_path"], "val_check_interval": 0.5, } default_updates.update(updates) args_d: dict = CHEAP_ARGS.copy() tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates) model = distill_main(argparse.Namespace(**args_d)) if not check_contents: return model contents = os.listdir(output_dir) contents = {os.path.basename(p) for p in contents} ckpt_files = [p for p in contents if p.endswith("ckpt")] assert len(ckpt_files) > 0 self.assertIn("test_generations.txt", contents) self.assertIn("test_results.txt", contents) metrics = load_json(model.metrics_save_path) last_step_stats = metrics["val"][-1] self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01) self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"]) self.assertIsInstance(last_step_stats[f"val_avg_{model.val_metric}"], float) desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) + 1) self.assertEqual(len(metrics["val"]), desired_n_evals) self.assertEqual(len(metrics["test"]), 1) return model class TestTheRest(TestCasePlus): @parameterized.expand( [T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY], ) def test_finetune(self, model): args_d: dict = CHEAP_ARGS.copy() task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization" args_d["label_smoothing"] = 0.1 if task == "translation" else 0 tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update( data_dir=tmp_dir, model_name_or_path=model, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, output_dir=output_dir, do_predict=True, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) assert "n_train" in args_d args = argparse.Namespace(**args_d) module = main(args) input_embeds = module.model.get_input_embeddings() assert not input_embeds.weight.requires_grad if model == T5_TINY: lm_head = module.model.lm_head assert not lm_head.weight.requires_grad assert (lm_head.weight == input_embeds.weight).all().item() elif model == FSMT_TINY: fsmt = module.model.model embed_pos = fsmt.decoder.embed_positions assert not embed_pos.weight.requires_grad assert not fsmt.decoder.embed_tokens.weight.requires_grad # check that embeds are not the same assert fsmt.decoder.embed_tokens != fsmt.encoder.embed_tokens else: bart = module.model.model embed_pos = bart.decoder.embed_positions assert not embed_pos.weight.requires_grad assert not bart.shared.weight.requires_grad # check that embeds are the same assert bart.decoder.embed_tokens == bart.encoder.embed_tokens assert bart.decoder.embed_tokens == bart.shared example_batch = load_json(module.output_dir / "text_batch.json") assert isinstance(example_batch, dict) assert len(example_batch) >= 4 def test_finetune_extra_model_args(self): args_d: dict = CHEAP_ARGS.copy() task = "summarization" tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) args_d.update( data_dir=tmp_dir, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, do_predict=False, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) # test models whose config includes the extra_model_args model = BART_TINY output_dir = self.get_auto_remove_tmp_dir() args_d1 = args_d.copy() args_d1.update( model_name_or_path=model, output_dir=output_dir, ) extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: args_d1[p] = 0.5 args = argparse.Namespace(**args_d1) model = main(args) for p in extra_model_params: assert getattr(model.config, p) == 0.5, f"failed to override the model config for param {p}" # test models whose config doesn't include the extra_model_args model = T5_TINY output_dir = self.get_auto_remove_tmp_dir() args_d2 = args_d.copy() args_d2.update( model_name_or_path=model, output_dir=output_dir, ) unsupported_param = "encoder_layerdrop" args_d2[unsupported_param] = 0.5 args = argparse.Namespace(**args_d2) with pytest.raises(Exception) as excinfo: model = main(args) assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute" def test_finetune_lr_schedulers(self): args_d: dict = CHEAP_ARGS.copy() task = "summarization" tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) model = BART_TINY output_dir = self.get_auto_remove_tmp_dir() args_d.update( data_dir=tmp_dir, model_name_or_path=model, output_dir=output_dir, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, do_predict=False, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) # emulate finetune.py parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = {"--help": True} # --help test with pytest.raises(SystemExit) as excinfo: with CaptureStdout() as cs: args = parser.parse_args(args) assert False, "--help is expected to sys.exit" assert excinfo.type == SystemExit expected = lightning_base.arg_to_scheduler_metavar assert expected in cs.out, "--help is expected to list the supported schedulers" # --lr_scheduler=non_existing_scheduler test unsupported_param = "non_existing_scheduler" args = {f"--lr_scheduler={unsupported_param}"} with pytest.raises(SystemExit) as excinfo: with CaptureStderr() as cs: args = parser.parse_args(args) assert False, "invalid argument is expected to sys.exit" assert excinfo.type == SystemExit expected = f"invalid choice: '{unsupported_param}'" assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}" # --lr_scheduler=existing_scheduler test supported_param = "cosine" args_d1 = args_d.copy() args_d1["lr_scheduler"] = supported_param args = argparse.Namespace(**args_d1) model = main(args) assert ( getattr(model.hparams, "lr_scheduler") == supported_param ), f"lr_scheduler={supported_param} shouldn't fail"
import argparse import logging import os import sys import tempfile from pathlib import Path import lightning_base import pytest import pytorch_lightning as pl import torch from convert_pl_checkpoint_to_hf import convert_pl_to_hf from distillation import distill_main from finetune import SummarizationModule, main from huggingface_hub import list_models from parameterized import parameterized from run_eval import generate_summaries_or_translations from torch import nn from transformers import AutoConfig, AutoModelForSeq2SeqLM from transformers.testing_utils import CaptureStderr, CaptureStdout, TestCasePlus, require_torch_gpu, slow from utils import label_smoothed_nll_loss, lmap, load_json logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger() CUDA_AVAILABLE = torch.cuda.is_available() CHEAP_ARGS = { "max_tokens_per_batch": None, "supervise_forward": True, "normalize_hidden": True, "label_smoothing": 0.2, "eval_max_gen_length": None, "eval_beams": 1, "val_metric": "loss", "save_top_k": 1, "adafactor": True, "early_stopping_patience": 2, "logger_name": "default", "length_penalty": 0.5, "cache_dir": "", "task": "summarization", "num_workers": 2, "alpha_hid": 0, "freeze_embeds": True, "enc_only": False, "tgt_suffix": "", "resume_from_checkpoint": None, "sortish_sampler": True, "student_decoder_layers": 1, "val_check_interval": 1.0, "output_dir": "", "fp16": False, # TODO(SS): set this to CUDA_AVAILABLE if ci installs apex or start using native amp "no_teacher": False, "fp16_opt_level": "O1", "gpus": 1 if CUDA_AVAILABLE else 0, "n_tpu_cores": 0, "max_grad_norm": 1.0, "do_train": True, "do_predict": True, "accumulate_grad_batches": 1, "server_ip": "", "server_port": "", "seed": 42, "model_name_or_path": "sshleifer/bart-tiny-random", "config_name": "", "tokenizer_name": "facebook/bart-large", "do_lower_case": False, "learning_rate": 0.3, "lr_scheduler": "linear", "weight_decay": 0.0, "adam_epsilon": 1e-08, "warmup_steps": 0, "max_epochs": 1, "train_batch_size": 2, "eval_batch_size": 2, "max_source_length": 12, "max_target_length": 12, "val_max_target_length": 12, "test_max_target_length": 12, "fast_dev_run": False, "no_cache": False, "n_train": -1, "n_val": -1, "n_test": -1, "student_encoder_layers": 1, "freeze_encoder": False, "auto_scale_batch_size": False, "overwrite_output_dir": False, "student": None, } def _dump_articles(path: Path, articles: list): content = "\n".join(articles) Path(path).open("w").writelines(content) ARTICLES = [" Sam ate lunch today.", "Sams lunch ingredients."] SUMMARIES = ["A very interesting story about what I ate for lunch.", "Avocado, celery, turkey, coffee"] T5_TINY = "patrickvonplaten/t5-tiny-random" T5_TINIER = "sshleifer/t5-tinier-random" BART_TINY = "sshleifer/bart-tiny-random" MBART_TINY = "sshleifer/tiny-mbart" MARIAN_TINY = "sshleifer/tiny-marian-en-de" FSMT_TINY = "stas/tiny-wmt19-en-de" stream_handler = logging.StreamHandler(sys.stdout) logger.addHandler(stream_handler) logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks def make_test_data_dir(tmp_dir): for split in ["train", "val", "test"]: _dump_articles(os.path.join(tmp_dir, f"{split}.source"), ARTICLES) _dump_articles(os.path.join(tmp_dir, f"{split}.target"), SUMMARIES) return tmp_dir class TestSummarizationDistiller(TestCasePlus): @classmethod def setUpClass(cls): logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks return cls @slow @require_torch_gpu def test_hub_configs(self): """I put require_torch_gpu cause I only want this to run with self-scheduled.""" model_list = list_models() org = "sshleifer" model_ids = [x.modelId for x in model_list if x.modelId.startswith(org)] allowed_to_be_broken = ["sshleifer/blenderbot-3B", "sshleifer/blenderbot-90M"] failures = [] for m in model_ids: if m in allowed_to_be_broken: continue try: AutoConfig.from_pretrained(m) except Exception: failures.append(m) assert not failures, f"The following models could not be loaded through AutoConfig: {failures}" def test_distill_no_teacher(self): updates = {"student_encoder_layers": 2, "student_decoder_layers": 1, "no_teacher": True} self._test_distiller_cli(updates) def test_distill_checkpointing_with_teacher(self): updates = { "student_encoder_layers": 2, "student_decoder_layers": 1, "max_epochs": 4, "val_check_interval": 0.25, "alpha_hid": 2.0, "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", } model = self._test_distiller_cli(updates, check_contents=False) ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) self.assertEqual(len(transformer_ckpts), 2) examples = lmap(str.strip, Path(model.hparams.data_dir).joinpath("test.source").open().readlines()) out_path = tempfile.mktemp() # XXX: not being cleaned up generate_summaries_or_translations(examples, out_path, str(model.output_dir / "best_tfmr")) self.assertTrue(Path(out_path).exists()) out_path_new = self.get_auto_remove_tmp_dir() convert_pl_to_hf(ckpts[0], transformer_ckpts[0].parent, out_path_new) assert os.path.exists(os.path.join(out_path_new, "pytorch_model.bin")) def test_loss_fn(self): model = AutoModelForSeq2SeqLM.from_pretrained(BART_TINY) input_ids, mask = model.dummy_inputs["input_ids"], model.dummy_inputs["attention_mask"] target_ids = torch.tensor([[0, 4, 8, 2], [0, 8, 2, 1]], dtype=torch.long, device=model.device) decoder_input_ids = target_ids[:, :-1].contiguous() # Why this line? lm_labels = target_ids[:, 1:].clone() # why clone? model_computed_loss = model( input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, labels=lm_labels, use_cache=False ).loss logits = model(input_ids, attention_mask=mask, decoder_input_ids=decoder_input_ids, use_cache=False).logits lprobs = nn.functional.log_softmax(logits, dim=-1) smoothed_loss, nll_loss = label_smoothed_nll_loss( lprobs, lm_labels, 0.1, ignore_index=model.config.pad_token_id ) with self.assertRaises(AssertionError): # TODO: understand why this breaks self.assertEqual(nll_loss, model_computed_loss) def test_distill_mbart(self): updates = { "student_encoder_layers": 2, "student_decoder_layers": 1, "num_train_epochs": 4, "val_check_interval": 0.25, "alpha_hid": 2.0, "task": "translation", "model_name_or_path": "IGNORE_THIS_IT_DOESNT_GET_USED", "tokenizer_name": MBART_TINY, "teacher": MBART_TINY, "src_lang": "en_XX", "tgt_lang": "ro_RO", } model = self._test_distiller_cli(updates, check_contents=False) assert model.model.config.model_type == "mbart" ckpts = list(Path(model.output_dir).glob("*.ckpt")) self.assertEqual(1, len(ckpts)) transformer_ckpts = list(Path(model.output_dir).glob("**/*.bin")) all_files = list(Path(model.output_dir).glob("best_tfmr/*")) assert len(all_files) > 2 self.assertEqual(len(transformer_ckpts), 2) def test_distill_t5(self): updates = { "student_encoder_layers": 1, "student_decoder_layers": 1, "alpha_hid": 2.0, "teacher": T5_TINY, "model_name_or_path": T5_TINY, "tokenizer_name": T5_TINY, } self._test_distiller_cli(updates) def test_distill_different_base_models(self): updates = { "teacher": T5_TINY, "student": T5_TINIER, "model_name_or_path": T5_TINIER, "tokenizer_name": T5_TINIER, } self._test_distiller_cli(updates) def _test_distiller_cli(self, updates, check_contents=True): default_updates = { "label_smoothing": 0.0, "early_stopping_patience": -1, "train_batch_size": 1, "eval_batch_size": 2, "max_epochs": 2, "alpha_mlm": 0.2, "alpha_ce": 0.8, "do_predict": True, "model_name_or_path": "sshleifer/tinier_bart", "teacher": CHEAP_ARGS["model_name_or_path"], "val_check_interval": 0.5, } default_updates.update(updates) args_d: dict = CHEAP_ARGS.copy() tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update(data_dir=tmp_dir, output_dir=output_dir, **default_updates) model = distill_main(argparse.Namespace(**args_d)) if not check_contents: return model contents = os.listdir(output_dir) contents = {os.path.basename(p) for p in contents} ckpt_files = [p for p in contents if p.endswith("ckpt")] assert len(ckpt_files) > 0 self.assertIn("test_generations.txt", contents) self.assertIn("test_results.txt", contents) metrics = load_json(model.metrics_save_path) last_step_stats = metrics["val"][-1] self.assertGreaterEqual(last_step_stats["val_avg_gen_time"], 0.01) self.assertGreaterEqual(1.0, last_step_stats["val_avg_gen_time"]) self.assertIsInstance(last_step_stats[f"val_avg_{model.val_metric}"], float) desired_n_evals = int(args_d["max_epochs"] * (1 / args_d["val_check_interval"]) + 1) self.assertEqual(len(metrics["val"]), desired_n_evals) self.assertEqual(len(metrics["test"]), 1) return model class TestTheRest(TestCasePlus): @parameterized.expand( [T5_TINY, BART_TINY, MBART_TINY, MARIAN_TINY, FSMT_TINY], ) def test_finetune(self, model): args_d: dict = CHEAP_ARGS.copy() task = "translation" if model in [MBART_TINY, MARIAN_TINY, FSMT_TINY] else "summarization" args_d["label_smoothing"] = 0.1 if task == "translation" else 0 tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) output_dir = self.get_auto_remove_tmp_dir() args_d.update( data_dir=tmp_dir, model_name_or_path=model, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, output_dir=output_dir, do_predict=True, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) assert "n_train" in args_d args = argparse.Namespace(**args_d) module = main(args) input_embeds = module.model.get_input_embeddings() assert not input_embeds.weight.requires_grad if model == T5_TINY: lm_head = module.model.lm_head assert not lm_head.weight.requires_grad assert (lm_head.weight == input_embeds.weight).all().item() elif model == FSMT_TINY: fsmt = module.model.model embed_pos = fsmt.decoder.embed_positions assert not embed_pos.weight.requires_grad assert not fsmt.decoder.embed_tokens.weight.requires_grad # check that embeds are not the same assert fsmt.decoder.embed_tokens != fsmt.encoder.embed_tokens else: bart = module.model.model embed_pos = bart.decoder.embed_positions assert not embed_pos.weight.requires_grad assert not bart.shared.weight.requires_grad # check that embeds are the same assert bart.decoder.embed_tokens == bart.encoder.embed_tokens assert bart.decoder.embed_tokens == bart.shared example_batch = load_json(module.output_dir / "text_batch.json") assert isinstance(example_batch, dict) assert len(example_batch) >= 4 def test_finetune_extra_model_args(self): args_d: dict = CHEAP_ARGS.copy() task = "summarization" tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) args_d.update( data_dir=tmp_dir, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, do_predict=False, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) # test models whose config includes the extra_model_args model = BART_TINY output_dir = self.get_auto_remove_tmp_dir() args_d1 = args_d.copy() args_d1.update( model_name_or_path=model, output_dir=output_dir, ) extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: args_d1[p] = 0.5 args = argparse.Namespace(**args_d1) model = main(args) for p in extra_model_params: assert getattr(model.config, p) == 0.5, f"failed to override the model config for param {p}" # test models whose config doesn't include the extra_model_args model = T5_TINY output_dir = self.get_auto_remove_tmp_dir() args_d2 = args_d.copy() args_d2.update( model_name_or_path=model, output_dir=output_dir, ) unsupported_param = "encoder_layerdrop" args_d2[unsupported_param] = 0.5 args = argparse.Namespace(**args_d2) with pytest.raises(Exception) as excinfo: model = main(args) assert str(excinfo.value) == f"model config doesn't have a `{unsupported_param}` attribute" def test_finetune_lr_schedulers(self): args_d: dict = CHEAP_ARGS.copy() task = "summarization" tmp_dir = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) model = BART_TINY output_dir = self.get_auto_remove_tmp_dir() args_d.update( data_dir=tmp_dir, model_name_or_path=model, output_dir=output_dir, tokenizer_name=None, train_batch_size=2, eval_batch_size=2, do_predict=False, task=task, src_lang="en_XX", tgt_lang="ro_RO", freeze_encoder=True, freeze_embeds=True, ) # emulate finetune.py parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = {"--help": True} # --help test with pytest.raises(SystemExit) as excinfo: with CaptureStdout() as cs: args = parser.parse_args(args) assert False, "--help is expected to sys.exit" assert excinfo.type == SystemExit expected = lightning_base.arg_to_scheduler_metavar assert expected in cs.out, "--help is expected to list the supported schedulers" # --lr_scheduler=non_existing_scheduler test unsupported_param = "non_existing_scheduler" args = {f"--lr_scheduler={unsupported_param}"} with pytest.raises(SystemExit) as excinfo: with CaptureStderr() as cs: args = parser.parse_args(args) assert False, "invalid argument is expected to sys.exit" assert excinfo.type == SystemExit expected = f"invalid choice: '{unsupported_param}'" assert expected in cs.err, f"should have bailed on invalid choice of scheduler {unsupported_param}" # --lr_scheduler=existing_scheduler test supported_param = "cosine" args_d1 = args_d.copy() args_d1["lr_scheduler"] = supported_param args = argparse.Namespace(**args_d1) model = main(args) assert ( getattr(model.hparams, "lr_scheduler") == supported_param ), f"lr_scheduler={supported_param} shouldn't fail"
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/commands/lfs.py
""" Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. Inspired by: github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md To launch debugger while developing: ``` [lfs "customtransfer.multipart"] path = /path/to/transformers/.env/bin/python args = -m debugpy --listen 5678 --wait-for-client /path/to/transformers/src/transformers/commands/transformers_cli.py lfs-multipart-upload ```""" import json import os import subprocess import sys import warnings from argparse import ArgumentParser from contextlib import AbstractContextManager from typing import Dict, List, Optional import requests from ..utils import logging from . import BaseTransformersCLICommand logger = logging.get_logger(__name__) # pylint: disable=invalid-name LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload" class LfsCommands(BaseTransformersCLICommand): """ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom transfer agent is: https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md This introduces two commands to the CLI: 1. $ transformers-cli lfs-enable-largefiles This should be executed once for each model repo that contains a model file >5GB. It's documented in the error message you get if you just try to git push a 5GB file without having enabled it before. 2. $ transformers-cli lfs-multipart-upload This command is called by lfs directly and is not meant to be called by the user. """ @staticmethod def register_subcommand(parser: ArgumentParser): enable_parser = parser.add_parser( "lfs-enable-largefiles", help=( "Deprecated: use `huggingface-cli` instead. Configure your repository to enable upload of files > 5GB." ), ) enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.") enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args)) upload_parser = parser.add_parser( LFS_MULTIPART_UPLOAD_COMMAND, help=( "Deprecated: use `huggingface-cli` instead. " "Command will get called by git-lfs, do not call it directly." ), ) upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args)) class LfsEnableCommand: def __init__(self, args): self.args = args def run(self): warnings.warn( "Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead." ) local_path = os.path.abspath(self.args.path) if not os.path.isdir(local_path): print("This does not look like a valid git repo.") exit(1) subprocess.run( "git config lfs.customtransfer.multipart.path transformers-cli".split(), check=True, cwd=local_path ) subprocess.run( f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(), check=True, cwd=local_path, ) print("Local repo set up for largefiles") def write_msg(msg: Dict): """Write out the message in Line delimited JSON.""" msg = json.dumps(msg) + "\n" sys.stdout.write(msg) sys.stdout.flush() def read_msg() -> Optional[Dict]: """Read Line delimited JSON from stdin.""" msg = json.loads(sys.stdin.readline().strip()) if "terminate" in (msg.get("type"), msg.get("event")): # terminate message received return None if msg.get("event") not in ("download", "upload"): logger.critical("Received unexpected message") sys.exit(1) return msg class FileSlice(AbstractContextManager): """ File-like object that only reads a slice of a file Inspired by stackoverflow.com/a/29838711/593036 """ def __init__(self, filepath: str, seek_from: int, read_limit: int): self.filepath = filepath self.seek_from = seek_from self.read_limit = read_limit self.n_seen = 0 def __enter__(self): self.f = open(self.filepath, "rb") self.f.seek(self.seek_from) return self def __len__(self): total_length = os.fstat(self.f.fileno()).st_size return min(self.read_limit, total_length - self.seek_from) def read(self, n=-1): if self.n_seen >= self.read_limit: return b"" remaining_amount = self.read_limit - self.n_seen data = self.f.read(remaining_amount if n < 0 else min(n, remaining_amount)) self.n_seen += len(data) return data def __iter__(self): yield self.read(n=4 * 1024 * 1024) def __exit__(self, *args): self.f.close() class LfsUploadCommand: def __init__(self, args): self.args = args def run(self): # Immediately after invoking a custom transfer process, git-lfs # sends initiation data to the process over stdin. # This tells the process useful information about the configuration. init_msg = json.loads(sys.stdin.readline().strip()) if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"): write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}}) sys.exit(1) # The transfer process should use the information it needs from the # initiation structure, and also perform any one-off setup tasks it # needs to do. It should then respond on stdout with a simple empty # confirmation structure, as follows: write_msg({}) # After the initiation exchange, git-lfs will send any number of # transfer requests to the stdin of the transfer process, in a serial sequence. while True: msg = read_msg() if msg is None: # When all transfers have been processed, git-lfs will send # a terminate event to the stdin of the transfer process. # On receiving this message the transfer process should # clean up and terminate. No response is expected. sys.exit(0) oid = msg["oid"] filepath = msg["path"] completion_url = msg["action"]["href"] header = msg["action"]["header"] chunk_size = int(header.pop("chunk_size")) presigned_urls: List[str] = list(header.values()) parts = [] for i, presigned_url in enumerate(presigned_urls): with FileSlice(filepath, seek_from=i * chunk_size, read_limit=chunk_size) as data: r = requests.put(presigned_url, data=data) r.raise_for_status() parts.append( { "etag": r.headers.get("etag"), "partNumber": i + 1, } ) # In order to support progress reporting while data is uploading / downloading, # the transfer process should post messages to stdout write_msg( { "event": "progress", "oid": oid, "bytesSoFar": (i + 1) * chunk_size, "bytesSinceLast": chunk_size, } ) # Not precise but that's ok. r = requests.post( completion_url, json={ "oid": oid, "parts": parts, }, ) r.raise_for_status() write_msg({"event": "complete", "oid": oid})
""" Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. Inspired by: github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md To launch debugger while developing: ``` [lfs "customtransfer.multipart"] path = /path/to/transformers/.env/bin/python args = -m debugpy --listen 5678 --wait-for-client /path/to/transformers/src/transformers/commands/transformers_cli.py lfs-multipart-upload ```""" import json import os import subprocess import sys import warnings from argparse import ArgumentParser from contextlib import AbstractContextManager from typing import Dict, List, Optional import requests from ..utils import logging from . import BaseTransformersCLICommand logger = logging.get_logger(__name__) # pylint: disable=invalid-name LFS_MULTIPART_UPLOAD_COMMAND = "lfs-multipart-upload" class LfsCommands(BaseTransformersCLICommand): """ Implementation of a custom transfer agent for the transfer type "multipart" for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom transfer agent is: https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md This introduces two commands to the CLI: 1. $ transformers-cli lfs-enable-largefiles This should be executed once for each model repo that contains a model file >5GB. It's documented in the error message you get if you just try to git push a 5GB file without having enabled it before. 2. $ transformers-cli lfs-multipart-upload This command is called by lfs directly and is not meant to be called by the user. """ @staticmethod def register_subcommand(parser: ArgumentParser): enable_parser = parser.add_parser( "lfs-enable-largefiles", help=( "Deprecated: use `huggingface-cli` instead. Configure your repository to enable upload of files > 5GB." ), ) enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.") enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args)) upload_parser = parser.add_parser( LFS_MULTIPART_UPLOAD_COMMAND, help=( "Deprecated: use `huggingface-cli` instead. " "Command will get called by git-lfs, do not call it directly." ), ) upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args)) class LfsEnableCommand: def __init__(self, args): self.args = args def run(self): warnings.warn( "Managing repositories through transformers-cli is deprecated. Please use `huggingface-cli` instead." ) local_path = os.path.abspath(self.args.path) if not os.path.isdir(local_path): print("This does not look like a valid git repo.") exit(1) subprocess.run( "git config lfs.customtransfer.multipart.path transformers-cli".split(), check=True, cwd=local_path ) subprocess.run( f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(), check=True, cwd=local_path, ) print("Local repo set up for largefiles") def write_msg(msg: Dict): """Write out the message in Line delimited JSON.""" msg = json.dumps(msg) + "\n" sys.stdout.write(msg) sys.stdout.flush() def read_msg() -> Optional[Dict]: """Read Line delimited JSON from stdin.""" msg = json.loads(sys.stdin.readline().strip()) if "terminate" in (msg.get("type"), msg.get("event")): # terminate message received return None if msg.get("event") not in ("download", "upload"): logger.critical("Received unexpected message") sys.exit(1) return msg class FileSlice(AbstractContextManager): """ File-like object that only reads a slice of a file Inspired by stackoverflow.com/a/29838711/593036 """ def __init__(self, filepath: str, seek_from: int, read_limit: int): self.filepath = filepath self.seek_from = seek_from self.read_limit = read_limit self.n_seen = 0 def __enter__(self): self.f = open(self.filepath, "rb") self.f.seek(self.seek_from) return self def __len__(self): total_length = os.fstat(self.f.fileno()).st_size return min(self.read_limit, total_length - self.seek_from) def read(self, n=-1): if self.n_seen >= self.read_limit: return b"" remaining_amount = self.read_limit - self.n_seen data = self.f.read(remaining_amount if n < 0 else min(n, remaining_amount)) self.n_seen += len(data) return data def __iter__(self): yield self.read(n=4 * 1024 * 1024) def __exit__(self, *args): self.f.close() class LfsUploadCommand: def __init__(self, args): self.args = args def run(self): # Immediately after invoking a custom transfer process, git-lfs # sends initiation data to the process over stdin. # This tells the process useful information about the configuration. init_msg = json.loads(sys.stdin.readline().strip()) if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"): write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}}) sys.exit(1) # The transfer process should use the information it needs from the # initiation structure, and also perform any one-off setup tasks it # needs to do. It should then respond on stdout with a simple empty # confirmation structure, as follows: write_msg({}) # After the initiation exchange, git-lfs will send any number of # transfer requests to the stdin of the transfer process, in a serial sequence. while True: msg = read_msg() if msg is None: # When all transfers have been processed, git-lfs will send # a terminate event to the stdin of the transfer process. # On receiving this message the transfer process should # clean up and terminate. No response is expected. sys.exit(0) oid = msg["oid"] filepath = msg["path"] completion_url = msg["action"]["href"] header = msg["action"]["header"] chunk_size = int(header.pop("chunk_size")) presigned_urls: List[str] = list(header.values()) parts = [] for i, presigned_url in enumerate(presigned_urls): with FileSlice(filepath, seek_from=i * chunk_size, read_limit=chunk_size) as data: r = requests.put(presigned_url, data=data) r.raise_for_status() parts.append( { "etag": r.headers.get("etag"), "partNumber": i + 1, } ) # In order to support progress reporting while data is uploading / downloading, # the transfer process should post messages to stdout write_msg( { "event": "progress", "oid": oid, "bytesSoFar": (i + 1) * chunk_size, "bytesSinceLast": chunk_size, } ) # Not precise but that's ok. r = requests.post( completion_url, json={ "oid": oid, "parts": parts, }, ) r.raise_for_status() write_msg({"event": "complete", "oid": oid})
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/openai/test_modeling_openai.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class OpenAIGPTModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, # intermediate_size=self.intermediate_size, # hidden_act=self.hidden_act, # hidden_dropout_prob=self.hidden_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, # type_vocab_size=self.type_vocab_size, # initializer_range=self.initializer_range pad_token_id=self.pad_token_id, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_openai_gpt_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_double_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTDoubleHeadsModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_openai_gpt_for_sequence_classification( self, config, input_ids, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels model = OpenAIGPTForSequenceClassification(config) model.to(torch_device) model.eval() sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class OpenAIGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) all_generative_model_classes = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly pipeline_model_mapping = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["input_ids"] = inputs_dict["labels"] inputs_dict["token_type_ids"] = inputs_dict["labels"] inputs_dict["mc_token_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=torch_device, ) inputs_dict["mc_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = OpenAIGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_openai_gpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs) def test_openai_gpt_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_openai_gpt_double_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs) def test_openai_gpt_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = OpenAIGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class OPENAIGPTModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_openai_gpt(self): model = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt") model.to(torch_device) input_ids = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=torch_device) # the president is expected_output_ids = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTConfig, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, ) class OpenAIGPTModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = OpenAIGPTConfig( vocab_size=self.vocab_size, n_embd=self.hidden_size, n_layer=self.num_hidden_layers, n_head=self.num_attention_heads, # intermediate_size=self.intermediate_size, # hidden_act=self.hidden_act, # hidden_dropout_prob=self.hidden_dropout_prob, # attention_probs_dropout_prob=self.attention_probs_dropout_prob, n_positions=self.max_position_embeddings, # type_vocab_size=self.type_vocab_size, # initializer_range=self.initializer_range pad_token_id=self.pad_token_id, ) head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2) return ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) def create_and_check_openai_gpt_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, head_mask=head_mask) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_double_lm_head_model(self, config, input_ids, head_mask, token_type_ids, *args): model = OpenAIGPTDoubleHeadsModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=input_ids) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_openai_gpt_for_sequence_classification( self, config, input_ids, head_mask, token_type_ids, *args ): config.num_labels = self.num_labels model = OpenAIGPTForSequenceClassification(config) model.to(torch_device) model.eval() sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) result = model(input_ids, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, head_mask, token_type_ids, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "token_type_ids": token_type_ids, "head_mask": head_mask, } return config, inputs_dict @require_torch class OpenAIGPTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( (OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification) if is_torch_available() else () ) all_generative_model_classes = ( (OpenAIGPTLMHeadModel,) if is_torch_available() else () ) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly pipeline_model_mapping = ( { "feature-extraction": OpenAIGPTModel, "text-classification": OpenAIGPTForSequenceClassification, "text-generation": OpenAIGPTLMHeadModel, "zero-shot": OpenAIGPTForSequenceClassification, } if is_torch_available() else {} ) # special case for DoubleHeads model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "OpenAIGPTDoubleHeadsModel": inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length), dtype=torch.long, device=torch_device, ) inputs_dict["input_ids"] = inputs_dict["labels"] inputs_dict["token_type_ids"] = inputs_dict["labels"] inputs_dict["mc_token_ids"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.num_choices), dtype=torch.long, device=torch_device, ) inputs_dict["mc_labels"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = OpenAIGPTModelTester(self) self.config_tester = ConfigTester(self, config_class=OpenAIGPTConfig, n_embd=37) def test_config(self): self.config_tester.run_common_tests() def test_openai_gpt_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_model(*config_and_inputs) def test_openai_gpt_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head_model(*config_and_inputs) def test_openai_gpt_double_lm_head_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_double_lm_head_model(*config_and_inputs) def test_openai_gpt_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = OpenAIGPTModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class OPENAIGPTModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_openai_gpt(self): model = OpenAIGPTLMHeadModel.from_pretrained("openai-gpt") model.to(torch_device) input_ids = torch.tensor([[481, 4735, 544]], dtype=torch.long, device=torch_device) # the president is expected_output_ids = [ 481, 4735, 544, 246, 963, 870, 762, 239, 244, 40477, 244, 249, 719, 881, 487, 544, 240, 244, 603, 481, ] # the president is a very good man. " \n " i\'m sure he is, " said the output_ids = model.generate(input_ids, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/utils/dummy_keras_nlp_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class TFGPT2Tokenizer(metaclass=DummyObject): _backends = ["keras_nlp"] def __init__(self, *args, **kwargs): requires_backends(self, ["keras_nlp"])
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class TFGPT2Tokenizer(metaclass=DummyObject): _backends = ["keras_nlp"] def __init__(self, *args, **kwargs): requires_backends(self, ["keras_nlp"])
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/deberta_v2/tokenization_deberta_v2_fast.py
# coding=utf-8 # Copyright 2020 Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization class for model DeBERTa.""" import os from shutil import copyfile from typing import Optional, Tuple from ...file_utils import is_sentencepiece_available from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if is_sentencepiece_available(): from .tokenization_deberta_v2 import DebertaV2Tokenizer else: DebertaV2Tokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/deberta-v2-xlarge": 512, "microsoft/deberta-v2-xxlarge": 512, "microsoft/deberta-v2-xlarge-mnli": 512, "microsoft/deberta-v2-xxlarge-mnli": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/deberta-v2-xlarge": {"do_lower_case": False}, "microsoft/deberta-v2-xxlarge": {"do_lower_case": False}, "microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False}, "microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False}, } class DebertaV2TokenizerFast(PreTrainedTokenizerFast): r""" Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. bos_token (`string`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. eos_token (`string`, *optional*, defaults to `"[SEP]"`): The end of sequence token. When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = DebertaV2Tokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=False, split_by_punct=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs, ) -> None: super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, **kwargs, ) self.do_lower_case = do_lower_case self.split_by_punct = split_by_punct self.vocab_file = vocab_file self.can_save_slow_tokenizer = False if not self.vocab_file else True def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format: - single sequence: [CLS] X [SEP] - pair of sequences: [CLS] A [SEP] B [SEP] Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
# coding=utf-8 # Copyright 2020 Microsoft and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Fast Tokenization class for model DeBERTa.""" import os from shutil import copyfile from typing import Optional, Tuple from ...file_utils import is_sentencepiece_available from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if is_sentencepiece_available(): from .tokenization_deberta_v2 import DebertaV2Tokenizer else: DebertaV2Tokenizer = None logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spm.model", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "microsoft/deberta-v2-xlarge": "https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/spm.model", "microsoft/deberta-v2-xxlarge": "https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/spm.model", "microsoft/deberta-v2-xlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/spm.model" ), "microsoft/deberta-v2-xxlarge-mnli": ( "https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/spm.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "microsoft/deberta-v2-xlarge": 512, "microsoft/deberta-v2-xxlarge": 512, "microsoft/deberta-v2-xlarge-mnli": 512, "microsoft/deberta-v2-xxlarge-mnli": 512, } PRETRAINED_INIT_CONFIGURATION = { "microsoft/deberta-v2-xlarge": {"do_lower_case": False}, "microsoft/deberta-v2-xxlarge": {"do_lower_case": False}, "microsoft/deberta-v2-xlarge-mnli": {"do_lower_case": False}, "microsoft/deberta-v2-xxlarge-mnli": {"do_lower_case": False}, } class DebertaV2TokenizerFast(PreTrainedTokenizerFast): r""" Constructs a DeBERTa-v2 fast tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. do_lower_case (`bool`, *optional*, defaults to `False`): Whether or not to lowercase the input when tokenizing. bos_token (`string`, *optional*, defaults to `"[CLS]"`): The beginning of sequence token that was used during pre-training. Can be used a sequence classifier token. When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. eos_token (`string`, *optional*, defaults to `"[SEP]"`): The end of sequence token. When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. unk_token (`str`, *optional*, defaults to `"[UNK]"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. pad_token (`str`, *optional*, defaults to `"[PAD]"`): The token used for padding, for example when batching sequences of different lengths. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES slow_tokenizer_class = DebertaV2Tokenizer def __init__( self, vocab_file=None, tokenizer_file=None, do_lower_case=False, split_by_punct=False, bos_token="[CLS]", eos_token="[SEP]", unk_token="[UNK]", sep_token="[SEP]", pad_token="[PAD]", cls_token="[CLS]", mask_token="[MASK]", **kwargs, ) -> None: super().__init__( vocab_file, tokenizer_file=tokenizer_file, do_lower_case=do_lower_case, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, pad_token=pad_token, cls_token=cls_token, mask_token=mask_token, split_by_punct=split_by_punct, **kwargs, ) self.do_lower_case = do_lower_case self.split_by_punct = split_by_punct self.vocab_file = vocab_file self.can_save_slow_tokenizer = False if not self.vocab_file else True def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None): """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A DeBERTa sequence has the following format: - single sequence: [CLS] X [SEP] - pair of sequences: [CLS] A [SEP] B [SEP] Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False): """ Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` or `encode_plus` methods. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is not None: return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None): """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A DeBERTa sequence pair mask has the following format: ``` 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | ``` If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/vit_hybrid/test_modeling_vit_hybrid.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ViT Hybrid model. """ import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class ViTHybridModelTester: def __init__( self, parent, batch_size=13, image_size=64, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, backbone_featmap_shape=[1, 16, 4, 4], scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.backbone_featmap_shape = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size num_patches = (self.image_size // 32) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 16, 32], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=backbone_config, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTHybridModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = ViTHybridForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTHybridModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTHybridModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTHybridConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTHybridModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( torch_device ) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.9090, -0.4993, -0.2389]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow @require_accelerate def test_accelerate_inference(self): feature_extractor = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384") model = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384", device_map="auto") image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() self.assertTrue(model.config.id2label[predicted_class_idx], "tabby, tabby cat")
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ViT Hybrid model. """ import inspect import unittest from transformers import ViTHybridConfig from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class ViTHybridModelTester: def __init__( self, parent, batch_size=13, image_size=64, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, backbone_featmap_shape=[1, 16, 4, 4], scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.scope = scope self.backbone_featmap_shape = backbone_featmap_shape # in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) # the number of patches is based on the feature map of the backbone, which by default uses an output stride # of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size num_patches = (self.image_size // 32) ** 2 self.seq_length = num_patches + 1 def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): backbone_config = { "global_padding": "same", "layer_type": "bottleneck", "depths": [3, 4, 9], "out_features": ["stage1", "stage2", "stage3"], "embedding_dynamic_padding": True, "hidden_sizes": [4, 8, 16, 32], "num_groups": 2, } return ViTHybridConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, backbone_featmap_shape=self.backbone_featmap_shape, backbone_config=backbone_config, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTHybridModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = ViTHybridForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTHybridModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViT does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else () pipeline_model_mapping = ( {"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification} if is_torch_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTHybridModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTHybridConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViT does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) # Skip the check for the backbone for name, module in model.named_modules(): if module.__class__.__name__ == "ViTHybridPatchEmbeddings": backbone_params = [f"{name}.{key}" for key in module.state_dict().keys()] break for name, param in model.named_parameters(): if param.requires_grad: if name in backbone_params: continue self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg=f"Parameter {name} of model {model_class} seems not properly initialized", ) @slow def test_model_from_pretrained(self): for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTHybridModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]) if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0]).to( torch_device ) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([-1.9090, -0.4993, -0.2389]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4)) @slow @require_accelerate def test_accelerate_inference(self): feature_extractor = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384") model = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384", device_map="auto") image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt") outputs = model(**inputs) logits = outputs.logits # model predicts one of the 1000 ImageNet classes predicted_class_idx = logits.argmax(-1).item() self.assertTrue(model.config.id2label[predicted_class_idx], "tabby, tabby cat")
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/mbart/configuration_mbart.py
# coding=utf-8 # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MBART model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) MBART_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/config.json", # See all MBART models at https://huggingface.co/models?filter=mbart } class MBartConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MBartModel`]. It is used to instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MBART [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MBartModel`] or [`TFMBartModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import MBartConfig, MBartModel >>> # Initializing a MBART facebook/mbart-large-cc25 style configuration >>> configuration = MBartConfig() >>> # Initializing a model (with random weights) from the facebook/mbart-large-cc25 style configuration >>> model = MBartModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mbart" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, forced_eos_token_id=forced_eos_token_id, **kwargs, ) # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig with Bart->MBart class MBartOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") elif self.task == "causal-lm": # TODO: figure this case out. common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} else: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, decoder_seq_length, is_pair, framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, encoder_seq_length = common_inputs["input_ids"].shape decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_past_length = decoder_seq_length + 3 decoder_shape = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["decoder_attention_mask"] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 ) common_inputs["past_key_values"] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(min_num_layers): common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) # TODO: test this. shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 num_encoder_layers, _ = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) common_inputs["past_key_values"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers) ] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) elif self.task == "causal-lm": common_inputs = self._generate_dummy_inputs_for_causal_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ["default", "seq2seq-lm"]: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t )
# coding=utf-8 # Copyright 2021, The Facebook AI Research Team and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ MBART model configuration""" from collections import OrderedDict from typing import Any, Mapping, Optional from ... import PreTrainedTokenizer from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeq2SeqConfigWithPast from ...onnx.utils import compute_effective_axis_dimension from ...utils import TensorType, is_torch_available, logging logger = logging.get_logger(__name__) MBART_PRETRAINED_CONFIG_ARCHIVE_MAP = { "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/config.json", # See all MBART models at https://huggingface.co/models?filter=mbart } class MBartConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`MBartModel`]. It is used to instantiate an MBART model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the MBART [facebook/mbart-large-cc25](https://huggingface.co/facebook/mbart-large-cc25) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the MBART model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`MBartModel`] or [`TFMBartModel`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. encoder_layers (`int`, *optional*, defaults to 12): Number of encoder layers. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. encoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. encoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. classifier_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for classifier. max_position_embeddings (`int`, *optional*, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. encoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. scale_embedding (`bool`, *optional*, defaults to `False`): Scale embeddings by diving by sqrt(d_model). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models) forced_eos_token_id (`int`, *optional*, defaults to 2): The id of the token to force as the last generated token when `max_length` is reached. Usually set to `eos_token_id`. Example: ```python >>> from transformers import MBartConfig, MBartModel >>> # Initializing a MBART facebook/mbart-large-cc25 style configuration >>> configuration = MBartConfig() >>> # Initializing a model (with random weights) from the facebook/mbart-large-cc25 style configuration >>> model = MBartModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "mbart" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"} def __init__( self, vocab_size=50265, max_position_embeddings=1024, encoder_layers=12, encoder_ffn_dim=4096, encoder_attention_heads=16, decoder_layers=12, decoder_ffn_dim=4096, decoder_attention_heads=16, encoder_layerdrop=0.0, decoder_layerdrop=0.0, use_cache=True, is_encoder_decoder=True, activation_function="gelu", d_model=1024, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, init_std=0.02, classifier_dropout=0.0, scale_embedding=False, pad_token_id=1, bos_token_id=0, eos_token_id=2, forced_eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.max_position_embeddings = max_position_embeddings self.d_model = d_model self.encoder_ffn_dim = encoder_ffn_dim self.encoder_layers = encoder_layers self.encoder_attention_heads = encoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.activation_function = activation_function self.init_std = init_std self.encoder_layerdrop = encoder_layerdrop self.decoder_layerdrop = decoder_layerdrop self.classifier_dropout = classifier_dropout self.use_cache = use_cache self.num_hidden_layers = encoder_layers self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, is_encoder_decoder=is_encoder_decoder, forced_eos_token_id=forced_eos_token_id, **kwargs, ) # Copied from transformers.models.bart.configuration_bart.BartOnnxConfig with Bart->MBart class MBartOnnxConfig(OnnxSeq2SeqConfigWithPast): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: common_inputs["decoder_input_ids"] = {0: "batch"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "past_decoder_sequence + sequence"} else: common_inputs["decoder_input_ids"] = {0: "batch", 1: "decoder_sequence"} common_inputs["decoder_attention_mask"] = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(common_inputs, direction="inputs") elif self.task == "causal-lm": # TODO: figure this case out. common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ] ) if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_inputs[f"past_key_values.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_inputs[f"past_key_values.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} else: common_inputs = OrderedDict( [ ("input_ids", {0: "batch", 1: "encoder_sequence"}), ("attention_mask", {0: "batch", 1: "encoder_sequence"}), ("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}), ("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}), ] ) return common_inputs @property def outputs(self) -> Mapping[str, Mapping[int, str]]: if self.task in ["default", "seq2seq-lm"]: common_outputs = super().outputs else: common_outputs = super(OnnxConfigWithPast, self).outputs if self.use_past: num_encoder_layers, _ = self.num_layers for i in range(num_encoder_layers): common_outputs[f"present.{i}.key"] = {0: "batch", 2: "past_sequence + sequence"} common_outputs[f"present.{i}.value"] = {0: "batch", 2: "past_sequence + sequence"} return common_outputs def _generate_dummy_inputs_for_default_and_seq2seq_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: encoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) # Generate decoder inputs decoder_seq_length = seq_length if not self.use_past else 1 decoder_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, decoder_seq_length, is_pair, framework ) decoder_inputs = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()} common_inputs = dict(**encoder_inputs, **decoder_inputs) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, encoder_seq_length = common_inputs["input_ids"].shape decoder_seq_length = common_inputs["decoder_input_ids"].shape[1] num_encoder_attention_heads, num_decoder_attention_heads = self.num_attention_heads encoder_shape = ( batch, num_encoder_attention_heads, encoder_seq_length, self._config.hidden_size // num_encoder_attention_heads, ) decoder_past_length = decoder_seq_length + 3 decoder_shape = ( batch, num_decoder_attention_heads, decoder_past_length, self._config.hidden_size // num_decoder_attention_heads, ) common_inputs["decoder_attention_mask"] = torch.cat( [common_inputs["decoder_attention_mask"], torch.ones(batch, decoder_past_length)], dim=1 ) common_inputs["past_key_values"] = [] # If the number of encoder and decoder layers are present in the model configuration, both are considered num_encoder_layers, num_decoder_layers = self.num_layers min_num_layers = min(num_encoder_layers, num_decoder_layers) max_num_layers = max(num_encoder_layers, num_decoder_layers) - min_num_layers remaining_side_name = "encoder" if num_encoder_layers > num_decoder_layers else "decoder" for _ in range(min_num_layers): common_inputs["past_key_values"].append( ( torch.zeros(decoder_shape), torch.zeros(decoder_shape), torch.zeros(encoder_shape), torch.zeros(encoder_shape), ) ) # TODO: test this. shape = encoder_shape if remaining_side_name == "encoder" else decoder_shape for _ in range(min_num_layers, max_num_layers): common_inputs["past_key_values"].append((torch.zeros(shape), torch.zeros(shape))) return common_inputs def _generate_dummy_inputs_for_causal_lm( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size, seq_length, is_pair, framework ) if self.use_past: if not is_torch_available(): raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.") else: import torch batch, seqlen = common_inputs["input_ids"].shape # Not using the same length for past_key_values past_key_values_length = seqlen + 2 num_encoder_layers, _ = self.num_layers num_encoder_attention_heads, _ = self.num_attention_heads past_shape = ( batch, num_encoder_attention_heads, past_key_values_length, self._config.hidden_size // num_encoder_attention_heads, ) mask_dtype = common_inputs["attention_mask"].dtype common_inputs["attention_mask"] = torch.cat( [common_inputs["attention_mask"], torch.ones(batch, past_key_values_length, dtype=mask_dtype)], dim=1 ) common_inputs["past_key_values"] = [ (torch.zeros(past_shape), torch.zeros(past_shape)) for _ in range(num_encoder_layers) ] return common_inputs def _generate_dummy_inputs_for_sequence_classification_and_question_answering( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: # Copied from OnnxConfig.generate_dummy_inputs # Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity. # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX batch_size = compute_effective_axis_dimension( batch_size, fixed_dimension=OnnxConfig.default_fixed_batch, num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX token_to_add = tokenizer.num_special_tokens_to_add(is_pair) seq_length = compute_effective_axis_dimension( seq_length, fixed_dimension=OnnxConfig.default_fixed_sequence, num_token_to_add=token_to_add ) # Generate dummy inputs according to compute batch and sequence dummy_input = [" ".join([tokenizer.unk_token]) * seq_length] * batch_size common_inputs = dict(tokenizer(dummy_input, return_tensors=framework)) return common_inputs def generate_dummy_inputs( self, tokenizer: PreTrainedTokenizer, batch_size: int = -1, seq_length: int = -1, is_pair: bool = False, framework: Optional[TensorType] = None, ) -> Mapping[str, Any]: if self.task in ["default", "seq2seq-lm"]: common_inputs = self._generate_dummy_inputs_for_default_and_seq2seq_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) elif self.task == "causal-lm": common_inputs = self._generate_dummy_inputs_for_causal_lm( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) else: common_inputs = self._generate_dummy_inputs_for_sequence_classification_and_question_answering( tokenizer, batch_size=batch_size, seq_length=seq_length, is_pair=is_pair, framework=framework ) return common_inputs def _flatten_past_key_values_(self, flattened_output, name, idx, t): if self.task in ["default", "seq2seq-lm"]: flattened_output = super()._flatten_past_key_values_(flattened_output, name, idx, t) else: flattened_output = super(OnnxSeq2SeqConfigWithPast, self)._flatten_past_key_values_( flattened_output, name, idx, t )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/biogpt/test_tokenization_biogpt.py
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class BioGptTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BioGptTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): """Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt""" tokenizer = BioGptTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) self.assertTrue(encoded_sentence == [2] + text) self.assertTrue(encoded_pair == [2] + text + [2] + text_2)
# coding=utf-8 # Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class BioGptTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BioGptTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens)) with open(self.merges_file, "w") as fp: fp.write("\n".join(merges)) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): """Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt""" tokenizer = BioGptTokenizer(self.vocab_file, self.merges_file) text = "lower" bpe_tokens = ["low", "er</w>"] tokens = tokenizer.tokenize(text) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + ["<unk>"] input_bpe_tokens = [14, 15, 20] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) @slow def test_sequence_builders(self): tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) self.assertTrue(encoded_sentence == [2] + text) self.assertTrue(encoded_pair == [2] + text + [2] + text_2)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/bartpho/tokenization_bartpho.py
# coding=utf-8 # Copyright 2021 VinAI Research and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for BARTpho-syllable model.""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"vinai/bartpho-syllable": 1024} class BartphoTokenizer(PreTrainedTokenizer): """ Adapted from [`XLMRobertaTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. This vocabulary is the pre-trained SentencePiece model available from the multilingual XLM-RoBERTa, also used in mBART, consisting of 250K types. monolingual_vocab_file (`str`): Path to the monolingual vocabulary file. This monolingual vocabulary consists of Vietnamese-specialized types extracted from the multilingual vocabulary vocab_file of 250K types. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, monolingual_vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self.vocab_file = vocab_file self.monolingual_vocab_file = monolingual_vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) # Load the reduced vocab # Keep order of special tokens for backward compatibility self.fairseq_tokens_to_ids = {} cnt = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(token) not in self.fairseq_tokens_to_ids: self.fairseq_tokens_to_ids[str(token)] = cnt cnt += 1 with open(monolingual_vocab_file, "r", encoding="utf-8") as f: for line in f.readlines(): token = line.strip().split()[0] self.fairseq_tokens_to_ids[token] = len(self.fairseq_tokens_to_ids) if str(mask_token) not in self.fairseq_tokens_to_ids: self.fairseq_tokens_to_ids[str(mask_token)] = len(self.fairseq_tokens_to_ids) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BARTPho sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.fairseq_ids_to_tokens) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.fairseq_ids_to_tokens[index] def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) out_monolingual_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( out_monolingual_vocab_file ) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file) elif not os.path.isfile(self.monolingual_vocab_file): with open(out_monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(token)} \n") return out_vocab_file, out_monolingual_vocab_file
# coding=utf-8 # Copyright 2021 VinAI Research and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for BARTpho-syllable model.""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model", "monolingual_vocab_file": "dict.txt"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model", }, "monolingual_vocab_file": { "vinai/bartpho-syllable": "https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"vinai/bartpho-syllable": 1024} class BartphoTokenizer(PreTrainedTokenizer): """ Adapted from [`XLMRobertaTokenizer`]. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. This vocabulary is the pre-trained SentencePiece model available from the multilingual XLM-RoBERTa, also used in mBART, consisting of 250K types. monolingual_vocab_file (`str`): Path to the monolingual vocabulary file. This monolingual vocabulary consists of Vietnamese-specialized types extracted from the multilingual vocabulary vocab_file of 250K types. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. Attributes: sp_model (`SentencePieceProcessor`): The *SentencePiece* processor that is used for every conversion (string, tokens and IDs). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, monolingual_vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self.vocab_file = vocab_file self.monolingual_vocab_file = monolingual_vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(str(vocab_file)) # Load the reduced vocab # Keep order of special tokens for backward compatibility self.fairseq_tokens_to_ids = {} cnt = 0 for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]: if str(token) not in self.fairseq_tokens_to_ids: self.fairseq_tokens_to_ids[str(token)] = cnt cnt += 1 with open(monolingual_vocab_file, "r", encoding="utf-8") as f: for line in f.readlines(): token = line.strip().split()[0] self.fairseq_tokens_to_ids[token] = len(self.fairseq_tokens_to_ids) if str(mask_token) not in self.fairseq_tokens_to_ids: self.fairseq_tokens_to_ids[str(mask_token)] = len(self.fairseq_tokens_to_ids) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None state["sp_model_proto"] = self.sp_model.serialized_model_proto() return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.LoadFromSerializedProto(self.sp_model_proto) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An BARTPho sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. BARTPho does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] @property def vocab_size(self): return len(self.fairseq_ids_to_tokens) def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def _tokenize(self, text: str) -> List[str]: return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] else: return self.unk_token_id def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.fairseq_ids_to_tokens[index] def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (strings for sub-words) in a single string.""" out_string = "".join(tokens).replace(SPIECE_UNDERLINE, " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) out_monolingual_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["monolingual_vocab_file"], ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) if os.path.abspath(self.monolingual_vocab_file) != os.path.abspath( out_monolingual_vocab_file ) and os.path.isfile(self.monolingual_vocab_file): copyfile(self.monolingual_vocab_file, out_monolingual_vocab_file) elif not os.path.isfile(self.monolingual_vocab_file): with open(out_monolingual_vocab_file, "w", encoding="utf-8") as fp: for token in self.fairseq_tokens_to_ids: if token not in self.all_special_tokens: fp.write(f"{str(token)} \n") return out_vocab_file, out_monolingual_vocab_file
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./examples/research_projects/longform-qa/eli5_app.py
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from eli5_utils import ( embed_questions_for_retrieval, make_qa_s2s_model, qa_s2s_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer MODEL_TYPE = "bart" LOAD_DENSE_INDEX = True @st.cache(allow_output_mutation=True) def load_models(): if LOAD_DENSE_INDEX: qar_tokenizer = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased") qar_model = AutoModel.from_pretrained("yjernite/retribert-base-uncased").to("cuda:0") _ = qar_model.eval() else: qar_tokenizer, qar_model = (None, None) if MODEL_TYPE == "bart": s2s_tokenizer = AutoTokenizer.from_pretrained("yjernite/bart_eli5") s2s_model = AutoModelForSeq2SeqLM.from_pretrained("yjernite/bart_eli5").to("cuda:0") save_dict = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth") s2s_model.load_state_dict(save_dict["model"]) _ = s2s_model.eval() else: s2s_tokenizer, s2s_model = make_qa_s2s_model( model_name="t5-small", from_file="seq2seq_models/eli5_t5_model_1024_4.pth", device="cuda:0" ) return (qar_tokenizer, qar_model, s2s_tokenizer, s2s_model) @st.cache(allow_output_mutation=True) def load_indexes(): if LOAD_DENSE_INDEX: faiss_res = faiss.StandardGpuResources() wiki40b_passages = datasets.load_dataset(path="wiki_snippets", name="wiki40b_en_100_0")["train"] wiki40b_passage_reps = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat", dtype="float32", mode="r", shape=(wiki40b_passages.num_rows, 128), ) wiki40b_index_flat = faiss.IndexFlatIP(128) wiki40b_gpu_index_flat = faiss.index_cpu_to_gpu(faiss_res, 1, wiki40b_index_flat) wiki40b_gpu_index_flat.add(wiki40b_passage_reps) # TODO fix for larger GPU else: wiki40b_passages, wiki40b_gpu_index_flat = (None, None) es_client = Elasticsearch([{"host": "localhost", "port": "9200"}]) return (wiki40b_passages, wiki40b_gpu_index_flat, es_client) @st.cache(allow_output_mutation=True) def load_train_data(): eli5 = datasets.load_dataset("eli5", name="LFQA_reddit") eli5_train = eli5["train_eli5"] eli5_train_q_reps = np.memmap( "eli5_questions_reps.dat", dtype="float32", mode="r", shape=(eli5_train.num_rows, 128) ) eli5_train_q_index = faiss.IndexFlatIP(128) eli5_train_q_index.add(eli5_train_q_reps) return (eli5_train, eli5_train_q_index) passages, gpu_dense_index, es_client = load_indexes() qar_tokenizer, qar_model, s2s_tokenizer, s2s_model = load_models() eli5_train, eli5_train_q_index = load_train_data() def find_nearest_training(question, n_results=10): q_rep = embed_questions_for_retrieval([question], qar_tokenizer, qar_model) D, I = eli5_train_q_index.search(q_rep, n_results) nn_examples = [eli5_train[int(i)] for i in I[0]] return nn_examples def make_support(question, source="wiki40b", method="dense", n_results=10): if source == "none": support_doc, hit_lst = (" <P> ".join(["" for _ in range(11)]).strip(), []) else: if method == "dense": support_doc, hit_lst = query_qa_dense_index( question, qar_model, qar_tokenizer, passages, gpu_dense_index, n_results ) else: support_doc, hit_lst = query_es_index( question, es_client, index_name="english_wiki40b_snippets_100w", n_results=n_results, ) support_list = [ (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst ] question_doc = "question: {} context: {}".format(question, support_doc) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _: None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _: None), } ) def answer_question( question_doc, s2s_model, s2s_tokenizer, min_len=64, max_len=256, sampling=False, n_beams=2, top_p=0.95, temp=0.8 ): with torch.no_grad(): answer = qa_s2s_generate( question_doc, s2s_model, s2s_tokenizer, num_answers=1, num_beams=n_beams, min_len=min_len, max_len=max_len, do_sample=sampling, temp=temp, top_p=top_p, top_k=None, max_input_length=1024, device="cuda:0", )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar header_html = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" header_full = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia description = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) action_list = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] demo_options = st.sidebar.checkbox("Demo options") if demo_options: action_st = st.sidebar.selectbox( "", action_list, index=3, ) action = action_list.index(action_st) show_type = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) show_passages = show_type == "Show full text of passages" else: action = 3 show_passages = True retrieval_options = st.sidebar.checkbox("Retrieval options") if retrieval_options: retriever_info = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) wiki_source = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) index_type = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: wiki_source = "wiki40b" index_type = "dense" sampled = "beam" n_beams = 2 min_len = 64 max_len = 256 top_p = None temp = None generate_options = st.sidebar.checkbox("Generation options") if generate_options: generate_info = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) sampled = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) min_len = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) max_len = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": n_beams = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: top_p = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) temp = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) n_beams = None # start main text questions_list = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] question_s = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": question = st.text_input("Enter your question here:", "") else: question = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": _, support_list_dense = make_support(question, source=wiki_source, method="dense", n_results=10) _, support_list_sparse = make_support(question, source=wiki_source, method="sparse", n_results=10) support_list = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] support_list = support_list[:10] question_doc = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: question_doc, support_list = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: answer, support_list = answer_question( question_doc, s2s_model, s2s_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): wiki_url = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) sec_titles = res[1].strip() if sec_titles == "": sections = "[{}]({})".format(res[0], wiki_url) else: sec_list = sec_titles.split(" & ") sections = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: nn_train_list = find_nearest_training(question) train_exple = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) answers_st = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) disclaimer = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
import datasets import faiss import numpy as np import streamlit as st import torch from elasticsearch import Elasticsearch from eli5_utils import ( embed_questions_for_retrieval, make_qa_s2s_model, qa_s2s_generate, query_es_index, query_qa_dense_index, ) import transformers from transformers import AutoModel, AutoModelForSeq2SeqLM, AutoTokenizer MODEL_TYPE = "bart" LOAD_DENSE_INDEX = True @st.cache(allow_output_mutation=True) def load_models(): if LOAD_DENSE_INDEX: qar_tokenizer = AutoTokenizer.from_pretrained("yjernite/retribert-base-uncased") qar_model = AutoModel.from_pretrained("yjernite/retribert-base-uncased").to("cuda:0") _ = qar_model.eval() else: qar_tokenizer, qar_model = (None, None) if MODEL_TYPE == "bart": s2s_tokenizer = AutoTokenizer.from_pretrained("yjernite/bart_eli5") s2s_model = AutoModelForSeq2SeqLM.from_pretrained("yjernite/bart_eli5").to("cuda:0") save_dict = torch.load("seq2seq_models/eli5_bart_model_blm_2.pth") s2s_model.load_state_dict(save_dict["model"]) _ = s2s_model.eval() else: s2s_tokenizer, s2s_model = make_qa_s2s_model( model_name="t5-small", from_file="seq2seq_models/eli5_t5_model_1024_4.pth", device="cuda:0" ) return (qar_tokenizer, qar_model, s2s_tokenizer, s2s_model) @st.cache(allow_output_mutation=True) def load_indexes(): if LOAD_DENSE_INDEX: faiss_res = faiss.StandardGpuResources() wiki40b_passages = datasets.load_dataset(path="wiki_snippets", name="wiki40b_en_100_0")["train"] wiki40b_passage_reps = np.memmap( "wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat", dtype="float32", mode="r", shape=(wiki40b_passages.num_rows, 128), ) wiki40b_index_flat = faiss.IndexFlatIP(128) wiki40b_gpu_index_flat = faiss.index_cpu_to_gpu(faiss_res, 1, wiki40b_index_flat) wiki40b_gpu_index_flat.add(wiki40b_passage_reps) # TODO fix for larger GPU else: wiki40b_passages, wiki40b_gpu_index_flat = (None, None) es_client = Elasticsearch([{"host": "localhost", "port": "9200"}]) return (wiki40b_passages, wiki40b_gpu_index_flat, es_client) @st.cache(allow_output_mutation=True) def load_train_data(): eli5 = datasets.load_dataset("eli5", name="LFQA_reddit") eli5_train = eli5["train_eli5"] eli5_train_q_reps = np.memmap( "eli5_questions_reps.dat", dtype="float32", mode="r", shape=(eli5_train.num_rows, 128) ) eli5_train_q_index = faiss.IndexFlatIP(128) eli5_train_q_index.add(eli5_train_q_reps) return (eli5_train, eli5_train_q_index) passages, gpu_dense_index, es_client = load_indexes() qar_tokenizer, qar_model, s2s_tokenizer, s2s_model = load_models() eli5_train, eli5_train_q_index = load_train_data() def find_nearest_training(question, n_results=10): q_rep = embed_questions_for_retrieval([question], qar_tokenizer, qar_model) D, I = eli5_train_q_index.search(q_rep, n_results) nn_examples = [eli5_train[int(i)] for i in I[0]] return nn_examples def make_support(question, source="wiki40b", method="dense", n_results=10): if source == "none": support_doc, hit_lst = (" <P> ".join(["" for _ in range(11)]).strip(), []) else: if method == "dense": support_doc, hit_lst = query_qa_dense_index( question, qar_model, qar_tokenizer, passages, gpu_dense_index, n_results ) else: support_doc, hit_lst = query_es_index( question, es_client, index_name="english_wiki40b_snippets_100w", n_results=n_results, ) support_list = [ (res["article_title"], res["section_title"].strip(), res["score"], res["passage_text"]) for res in hit_lst ] question_doc = "question: {} context: {}".format(question, support_doc) return question_doc, support_list @st.cache( hash_funcs={ torch.Tensor: (lambda _: None), transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _: None), } ) def answer_question( question_doc, s2s_model, s2s_tokenizer, min_len=64, max_len=256, sampling=False, n_beams=2, top_p=0.95, temp=0.8 ): with torch.no_grad(): answer = qa_s2s_generate( question_doc, s2s_model, s2s_tokenizer, num_answers=1, num_beams=n_beams, min_len=min_len, max_len=max_len, do_sample=sampling, temp=temp, top_p=top_p, top_k=None, max_input_length=1024, device="cuda:0", )[0] return (answer, support_list) st.title("Long Form Question Answering with ELI5") # Start sidebar header_html = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>" header_full = """ <html> <head> <style> .img-container { padding-left: 90px; padding-right: 90px; padding-top: 50px; padding-bottom: 50px; background-color: #f0f3f9; } </style> </head> <body> <span class="img-container"> <!-- Inline parent element --> %s </span> </body> </html> """ % ( header_html, ) st.sidebar.markdown( header_full, unsafe_allow_html=True, ) # Long Form QA with ELI5 and Wikipedia description = """ This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html). First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset, a pre-processed fixed snapshot of Wikipedia. """ st.sidebar.markdown(description, unsafe_allow_html=True) action_list = [ "Answer the question", "View the retrieved document only", "View the most similar ELI5 question and answer", "Show me everything, please!", ] demo_options = st.sidebar.checkbox("Demo options") if demo_options: action_st = st.sidebar.selectbox( "", action_list, index=3, ) action = action_list.index(action_st) show_type = st.sidebar.selectbox( "", ["Show full text of passages", "Show passage section titles"], index=0, ) show_passages = show_type == "Show full text of passages" else: action = 3 show_passages = True retrieval_options = st.sidebar.checkbox("Retrieval options") if retrieval_options: retriever_info = """ ### Information retriever options The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs. The answer is then generated by sequence to sequence model which takes the question and retrieved document as input. """ st.sidebar.markdown(retriever_info) wiki_source = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"]) index_type = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"]) else: wiki_source = "wiki40b" index_type = "dense" sampled = "beam" n_beams = 2 min_len = 64 max_len = 256 top_p = None temp = None generate_options = st.sidebar.checkbox("Generation options") if generate_options: generate_info = """ ### Answer generation options The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large) weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with **beam** search, or **sample** from the decoder's output probabilities. """ st.sidebar.markdown(generate_info) sampled = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"]) min_len = st.sidebar.slider( "Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None ) max_len = st.sidebar.slider( "Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None ) if sampled == "beam": n_beams = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None) else: top_p = st.sidebar.slider( "Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None ) temp = st.sidebar.slider( "Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None ) n_beams = None # start main text questions_list = [ "<MY QUESTION>", "How do people make chocolate?", "Why do we get a fever when we are sick?", "How can different animals perceive different colors?", "What is natural language processing?", "What's the best way to treat a sunburn?", "What exactly are vitamins ?", "How does nuclear energy provide electricity?", "What's the difference between viruses and bacteria?", "Why are flutes classified as woodwinds when most of them are made out of metal ?", "Why do people like drinking coffee even though it tastes so bad?", "What happens when wine ages? How does it make the wine taste better?", "If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?", "How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?", "How does New Zealand have so many large bird predators?", ] question_s = st.selectbox( "What would you like to ask? ---- select <MY QUESTION> to enter a new query", questions_list, index=1, ) if question_s == "<MY QUESTION>": question = st.text_input("Enter your question here:", "") else: question = question_s if st.button("Show me!"): if action in [0, 1, 3]: if index_type == "mixed": _, support_list_dense = make_support(question, source=wiki_source, method="dense", n_results=10) _, support_list_sparse = make_support(question, source=wiki_source, method="sparse", n_results=10) support_list = [] for res_d, res_s in zip(support_list_dense, support_list_sparse): if tuple(res_d) not in support_list: support_list += [tuple(res_d)] if tuple(res_s) not in support_list: support_list += [tuple(res_s)] support_list = support_list[:10] question_doc = "<P> " + " <P> ".join([res[-1] for res in support_list]) else: question_doc, support_list = make_support(question, source=wiki_source, method=index_type, n_results=10) if action in [0, 3]: answer, support_list = answer_question( question_doc, s2s_model, s2s_tokenizer, min_len=min_len, max_len=int(max_len), sampling=(sampled == "sampled"), n_beams=n_beams, top_p=top_p, temp=temp, ) st.markdown("### The model generated answer is:") st.write(answer) if action in [0, 1, 3] and wiki_source != "none": st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:") for i, res in enumerate(support_list): wiki_url = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_")) sec_titles = res[1].strip() if sec_titles == "": sections = "[{}]({})".format(res[0], wiki_url) else: sec_list = sec_titles.split(" & ") sections = " & ".join( ["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list] ) st.markdown( "{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections), unsafe_allow_html=True, ) if show_passages: st.write( '> <span style="font-family:arial; font-size:10pt;">' + res[-1] + "</span>", unsafe_allow_html=True ) if action in [2, 3]: nn_train_list = find_nearest_training(question) train_exple = nn_train_list[0] st.markdown( "--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"]) ) answers_st = [ "{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""])) for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"])) if i == 0 or sc > 2 ] st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st))) disclaimer = """ --- **Disclaimer** *The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system. Evaluating biases of such a model and ensuring factual generations are still very much open research problems. Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.* """ st.sidebar.markdown(disclaimer, unsafe_allow_html=True)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/layoutxlm/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _import_structure = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_layoutxlm"] = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_layoutxlm_fast"] = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, is_vision_available, ) _import_structure = {"processing_layoutxlm": ["LayoutXLMProcessor"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_layoutxlm"] = ["LayoutXLMTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_layoutxlm_fast"] = ["LayoutXLMTokenizerFast"] if TYPE_CHECKING: from .processing_layoutxlm import LayoutXLMProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm import LayoutXLMTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/convert_tf_hub_seq_to_seq_bert_to_pytorch.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Seq2Seq TF Hub checkpoint.""" import argparse from . import ( BertConfig, BertGenerationConfig, BertGenerationDecoder, BertGenerationEncoder, load_tf_weights_in_bert_generation, logging, ) logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_hub_path, pytorch_dump_path, is_encoder_named_decoder, vocab_size, is_encoder): # Initialise PyTorch model bert_config = BertConfig.from_pretrained( "bert-large-cased", vocab_size=vocab_size, max_position_embeddings=512, is_decoder=True, add_cross_attention=True, ) bert_config_dict = bert_config.to_dict() del bert_config_dict["type_vocab_size"] config = BertGenerationConfig(**bert_config_dict) if is_encoder: model = BertGenerationEncoder(config) else: model = BertGenerationDecoder(config) print(f"Building PyTorch model from configuration: {config}") # Load weights from tf checkpoint load_tf_weights_in_bert_generation( model, tf_hub_path, model_class="bert", is_encoder_named_decoder=is_encoder_named_decoder, is_encoder=is_encoder, ) # Save pytorch-model print(f"Save PyTorch model and config to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_hub_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_named_decoder", action="store_true", help="If decoder has to be renamed to encoder in PyTorch model.", ) parser.add_argument("--is_encoder", action="store_true", help="If model is an encoder.") parser.add_argument("--vocab_size", default=50358, type=int, help="Vocab size of model") args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_hub_path, args.pytorch_dump_path, args.is_encoder_named_decoder, args.vocab_size, is_encoder=args.is_encoder, )
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Seq2Seq TF Hub checkpoint.""" import argparse from . import ( BertConfig, BertGenerationConfig, BertGenerationDecoder, BertGenerationEncoder, load_tf_weights_in_bert_generation, logging, ) logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_hub_path, pytorch_dump_path, is_encoder_named_decoder, vocab_size, is_encoder): # Initialise PyTorch model bert_config = BertConfig.from_pretrained( "bert-large-cased", vocab_size=vocab_size, max_position_embeddings=512, is_decoder=True, add_cross_attention=True, ) bert_config_dict = bert_config.to_dict() del bert_config_dict["type_vocab_size"] config = BertGenerationConfig(**bert_config_dict) if is_encoder: model = BertGenerationEncoder(config) else: model = BertGenerationDecoder(config) print(f"Building PyTorch model from configuration: {config}") # Load weights from tf checkpoint load_tf_weights_in_bert_generation( model, tf_hub_path, model_class="bert", is_encoder_named_decoder=is_encoder_named_decoder, is_encoder=is_encoder, ) # Save pytorch-model print(f"Save PyTorch model and config to {pytorch_dump_path}") model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_hub_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--is_encoder_named_decoder", action="store_true", help="If decoder has to be renamed to encoder in PyTorch model.", ) parser.add_argument("--is_encoder", action="store_true", help="If model is an encoder.") parser.add_argument("--vocab_size", default=50358, type=int, help="Vocab size of model") args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_hub_path, args.pytorch_dump_path, args.is_encoder_named_decoder, args.vocab_size, is_encoder=args.is_encoder, )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/swin2sr/__init__.py
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/utils/dummy_detectron2_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import requires_backends LAYOUTLM_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv2Model: def __init__(self, *args, **kwargs): requires_backends(self, ["detectron2"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["detectron2"])
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import requires_backends LAYOUTLM_V2_PRETRAINED_MODEL_ARCHIVE_LIST = None class LayoutLMv2Model: def __init__(self, *args, **kwargs): requires_backends(self, ["detectron2"]) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, ["detectron2"])
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/sew_d/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_sew_d": ["SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWDConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_sew_d"] = [ "SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWDForCTC", "SEWDForSequenceClassification", "SEWDModel", "SEWDPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew_d import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWDConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew_d import ( SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST, SEWDForCTC, SEWDForSequenceClassification, SEWDModel, SEWDPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_sew_d": ["SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWDConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_sew_d"] = [ "SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST", "SEWDForCTC", "SEWDForSequenceClassification", "SEWDModel", "SEWDPreTrainedModel", ] if TYPE_CHECKING: from .configuration_sew_d import SEW_D_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWDConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew_d import ( SEW_D_PRETRAINED_MODEL_ARCHIVE_LIST, SEWDForCTC, SEWDForSequenceClassification, SEWDModel, SEWDPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/blenderbot_small/test_modeling_flax_blenderbot_small.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxBlenderbotSmallModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=32, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class BlenderbotHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=np.int64, ) batch_size = input_ids.shape[0] config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size # @timeout_decorator.timeout(1) # not working with the decorator so far def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_model = FlaxBlenderbotSmallForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_lm_uneven_forward(self): config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = FlaxBlenderbotSmallForConditionalGeneration(config) context = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.int64) summary = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.int64) outputs = lm_model(input_ids=context, decoder_input_ids=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_shift_tokens_right(self): input_ids = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.int64) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = np.equal(input_ids, 1).astype(np.float32).sum() n_pad_after = np.equal(shifted, 1).astype(np.float32).sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0], 2).all()) @require_flax class FlaxBlenderbotSmallModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) all_generative_model_classes = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxBlenderbotSmallModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/blenderbot_small-90M") # FlaxBlenderbotForSequenceClassification expects eos token in input_ids input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs)
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html os.environ["XLA_PYTHON_CLIENT_ALLOCATOR"] = "platform" import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids=None, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = np.where(input_ids != config.pad_token_id, 1, 0) if decoder_attention_mask is None: decoder_attention_mask = np.where(decoder_input_ids != config.pad_token_id, 1, 0) if head_mask is None: head_mask = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: cross_attn_head_mask = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class FlaxBlenderbotSmallModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=32, eos_token_id=2, pad_token_id=1, bos_token_id=0, initializer_range=0.02, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.initializer_range = initializer_range def prepare_config_and_inputs(self): input_ids = np.clip(ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size), 3, self.vocab_size) input_ids = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1), dtype=np.int64)), -1) decoder_input_ids = shift_tokens_right(input_ids, 1, 2) config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, initializer_range=self.initializer_range, use_cache=False, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def check_use_cache_forward(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_attention_mask = jnp.ones((decoder_input_ids.shape[0], max_decoder_length), dtype="i4") decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, decoder_attention_mask=decoder_attention_mask, past_key_values=outputs_cache.past_key_values, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") def check_use_cache_forward_with_attn_mask(self, model_class_name, config, inputs_dict): max_decoder_length = 20 model = model_class_name(config) encoder_outputs = model.encode(inputs_dict["input_ids"]) decoder_input_ids, decoder_attention_mask = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) decoder_attention_mask_cache = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1])), ], axis=-1, ) past_key_values = model.init_cache(decoder_input_ids.shape[0], max_decoder_length, encoder_outputs) decoder_position_ids = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1)[None, :], (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1), ) outputs_cache = model.decode( decoder_input_ids[:, :-1], encoder_outputs, decoder_attention_mask=decoder_attention_mask_cache, past_key_values=past_key_values, decoder_position_ids=decoder_position_ids, ) decoder_position_ids = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]], dtype="i4") outputs_cache_next = model.decode( decoder_input_ids[:, -1:], encoder_outputs, past_key_values=outputs_cache.past_key_values, decoder_attention_mask=decoder_attention_mask_cache, decoder_position_ids=decoder_position_ids, ) outputs = model.decode(decoder_input_ids, encoder_outputs, decoder_attention_mask=decoder_attention_mask) diff = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]))) self.parent.assertTrue(diff < 1e-3, msg=f"Max diff is {diff}") @require_flax class BlenderbotHeadTests(unittest.TestCase): vocab_size = 99 def _get_config_and_data(self): input_ids = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=np.int64, ) batch_size = input_ids.shape[0] config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size # @timeout_decorator.timeout(1) # not working with the decorator so far def test_lm_forward(self): config, input_ids, batch_size = self._get_config_and_data() lm_model = FlaxBlenderbotSmallForConditionalGeneration(config) outputs = lm_model(input_ids=input_ids) expected_shape = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_lm_uneven_forward(self): config = BlenderbotSmallConfig( vocab_size=self.vocab_size, d_model=14, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=8, decoder_ffn_dim=8, max_position_embeddings=48, ) lm_model = FlaxBlenderbotSmallForConditionalGeneration(config) context = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]], dtype=np.int64) summary = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]], dtype=np.int64) outputs = lm_model(input_ids=context, decoder_input_ids=summary) expected_shape = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape, expected_shape) def test_shift_tokens_right(self): input_ids = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=np.int64) shifted = shift_tokens_right(input_ids, 1, 2) n_pad_before = np.equal(input_ids, 1).astype(np.float32).sum() n_pad_after = np.equal(shifted, 1).astype(np.float32).sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(np.equal(shifted[:, 0], 2).all()) @require_flax class FlaxBlenderbotSmallModelTest(FlaxModelTesterMixin, unittest.TestCase, FlaxGenerationTesterMixin): is_encoder_decoder = True all_model_classes = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) all_generative_model_classes = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def setUp(self): self.model_tester = FlaxBlenderbotSmallModelTester(self) def test_use_cache_forward(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(model_class, config, inputs_dict) def test_use_cache_forward_with_attn_mask(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(model_class, config, inputs_dict) def test_encode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def encode_jitted(input_ids, attention_mask=None, **kwargs): return model.encode(input_ids=input_ids, attention_mask=attention_mask) with self.subTest("JIT Enabled"): jitted_outputs = encode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = encode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_decode(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) encoder_outputs = model.encode(inputs_dict["input_ids"], inputs_dict["attention_mask"]) prepared_inputs_dict = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(decoder_input_ids, decoder_attention_mask, encoder_outputs): return model.decode( decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, encoder_outputs=encoder_outputs, ) with self.subTest("JIT Enabled"): jitted_outputs = decode_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = decode_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) @slow def test_model_from_pretrained(self): for model_class_name in self.all_model_classes: model = model_class_name.from_pretrained("facebook/blenderbot_small-90M") # FlaxBlenderbotForSequenceClassification expects eos token in input_ids input_ids = np.ones((1, 1)) * model.config.eos_token_id outputs = model(input_ids) self.assertIsNotNone(outputs)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py
#################################################################################################### # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import MegatronBertConfig #################################################################################################### def recursive_print(name, val, spaces=0): # Format the message. if name is None: msg = None else: fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}" msg = fmt.format(name) # Print and recurse (if needed). if isinstance(val, dict): if msg is not None: print(msg) for k in val.keys(): recursive_print(k, val[k], spaces + 2) elif isinstance(val, torch.Tensor): print(msg, ":", val.size()) else: print(msg, ":", val) def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace BERT. input_shape = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 2) param = param.transpose(1, 2).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 1).contiguous() param = param.view(*input_shape) return param #################################################################################################### def convert_megatron_checkpoint(args, input_state_dict, config): # The converted output model. output_state_dict = {} # old versions did not store training args ds_args = input_state_dict.get("args", None) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) config.tokenizer_type = ds_args.tokenizer_type config.vocab_size = ds_args.padded_vocab_size config.max_position_embeddings = ds_args.max_position_embeddings config.hidden_size = ds_args.hidden_size config.num_hidden_layers = ds_args.num_layers config.num_attention_heads = ds_args.num_attention_heads config.intermediate_size = ds_args.ffn_hidden_size if "ffn_hidden_size" in ds_args else 4 * ds_args.hidden_size # pprint(config) # The number of heads. heads = config.num_attention_heads # The hidden_size per head. hidden_size_per_head = config.hidden_size // heads # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): checkpoint_version = input_state_dict["checkpoint_version"] else: checkpoint_version = 0.0 # The model. model = input_state_dict["model"] # The language model. lm = model["language_model"] # The embeddings. embeddings = lm["embedding"] # The word embeddings. word_embeddings = embeddings["word_embeddings"]["weight"] # Truncate the embedding table to vocab_size rows. word_embeddings = word_embeddings[: config.vocab_size, :] # Store the word embeddings. output_state_dict["bert.embeddings.word_embeddings.weight"] = word_embeddings # The position embeddings. pos_embeddings = embeddings["position_embeddings"]["weight"] assert pos_embeddings.size(0) == config.max_position_embeddings and pos_embeddings.size(1) == config.hidden_size # Store the position embeddings. output_state_dict["bert.embeddings.position_embeddings.weight"] = pos_embeddings # The token-type embeddings. tokentype_embeddings = embeddings["tokentype_embeddings"]["weight"] # Store the position embeddings. output_state_dict["bert.embeddings.token_type_embeddings.weight"] = tokentype_embeddings # The transformer. transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"] # The regex to extract layer names. layer_re = re.compile("layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # The simple map of names for "automated" rules. megatron_to_transformers = { "attention.dense": ".attention.output.dense.", "self_attention.dense": ".attention.output.dense.", "mlp.dense_h_to_4h": ".intermediate.dense.", "mlp.dense_4h_to_h": ".output.dense.", } # Keep track of the attention/query/value tensor. attention_qkv_weight = None # Extract the layers. for key, val in transformer.items(): # Match the name. m = layer_re.match(key) # Stop if that's not a layer if m is None: break # The index of the layer. layer_idx = int(m.group(1)) # The name of the operation. op_name = m.group(2) # Is it a weight or a bias? weight_or_bias = m.group(3) # The name of the layer. layer_name = f"bert.encoder.layer.{layer_idx}" # For layernorm(s), simply store the layer norm. if op_name.endswith("layernorm"): ln_name = "attention.ln" if op_name.startswith("input") else "ln" output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Make sure the QKV pointer is nil. assert attention_qkv_weight is None, "" out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head) # Store the tensor as we need the bias as well to interleave QKV and biases. attention_qkv_weight = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": # Make sure we read the weight tensor. assert attention_qkv_weight is not None, "" # Split the QKV matrix into Q, K and V. Megatron stores Q,K,V interleaved. q = attention_qkv_weight[0 * config.hidden_size : 1 * config.hidden_size, :] k = attention_qkv_weight[1 * config.hidden_size : 2 * config.hidden_size, :] v = attention_qkv_weight[2 * config.hidden_size : 3 * config.hidden_size, :] out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head) # Split the bias. q_bias = out_val[0 * config.hidden_size : 1 * config.hidden_size] k_bias = out_val[1 * config.hidden_size : 2 * config.hidden_size] v_bias = out_val[2 * config.hidden_size : 3 * config.hidden_size] # Store. output_state_dict[f"{layer_name}.attention.self.query.weight"] = q output_state_dict[f"{layer_name}.attention.self.query.bias"] = q_bias output_state_dict[f"{layer_name}.attention.self.key.weight"] = k output_state_dict[f"{layer_name}.attention.self.key.bias"] = k_bias output_state_dict[f"{layer_name}.attention.self.value.weight"] = v output_state_dict[f"{layer_name}.attention.self.value.bias"] = v_bias # Clear the stored tensor. attention_qkv_weight = None # Copy weights and biases as is. elif weight_or_bias in ["weight", "bias"]: out_name = megatron_to_transformers[op_name] output_state_dict[layer_name + out_name + weight_or_bias] = val # The final layernorm. output_state_dict["bert.encoder.ln.weight"] = transformer["final_layernorm.weight"] output_state_dict["bert.encoder.ln.bias"] = transformer["final_layernorm.bias"] # The pooler. pooler = lm["pooler"] # Store the matrix and the bias. output_state_dict["bert.pooler.dense.weight"] = pooler["dense.weight"] output_state_dict["bert.pooler.dense.bias"] = pooler["dense.bias"] # The LM head from Megatron (for RACE). lm_head = model["lm_head"] # The transform matrix. output_state_dict["cls.predictions.transform.dense.weight"] = lm_head["dense.weight"] output_state_dict["cls.predictions.transform.dense.bias"] = lm_head["dense.bias"] # The transform LN. output_state_dict["cls.predictions.transform.LayerNorm.weight"] = lm_head["layernorm.weight"] output_state_dict["cls.predictions.transform.LayerNorm.bias"] = lm_head["layernorm.bias"] # For the decoder, we replicate the weights. output_state_dict["cls.predictions.decoder.weight"] = word_embeddings output_state_dict["cls.predictions.bias"] = lm_head["bias"] # The classifier from Megatron (for MLNI). binary_head = model["binary_head"] # Store the classifier. output_state_dict["cls.seq_relationship.weight"] = binary_head["weight"] output_state_dict["cls.seq_relationship.bias"] = binary_head["bias"] # It should be done! return output_state_dict #################################################################################################### def main(): # Create the argument parser. parser = argparse.ArgumentParser() parser.add_argument("--print-checkpoint-structure", action="store_true") parser.add_argument("path_to_checkpoint", type=str, help="Path to the ZIP file containing the checkpoint") parser.add_argument( "--config_file", default="", type=str, help="An optional config json file describing the pre-trained model.", ) args = parser.parse_args() # Extract the basename. basename = os.path.dirname(args.path_to_checkpoint) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'Extracting PyTorch state dictionary from "{args.path_to_checkpoint}"') if args.path_to_checkpoint.endswith(".zip"): with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint: with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict: input_state_dict = torch.load(pytorch_dict, map_location="cpu") else: input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu") if args.config_file == "": # Default config of megatron-bert 345m config = MegatronBertConfig() # different megatron-bert-*-345m models have different vocab sizes, so override the default # config (which is for megatron-bert-cased-345m) with the actual vocab dimension config.vocab_size = input_state_dict["model"]["lm_head"]["bias"].numel() else: config = MegatronBertConfig.from_json_file(args.config_file) # Convert. print("Converting") output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(None, output_state_dict) # Store the config to file. print("Saving config") config.save_pretrained(basename) # Store the state_dict to file. output_checkpoint_file = os.path.join(basename, "pytorch_model.bin") print(f'Saving checkpoint to "{output_checkpoint_file}"') torch.save(output_state_dict, output_checkpoint_file) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
#################################################################################################### # Copyright (c) 2021-, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #################################################################################################### # # Note: If when running this conversion script you're getting an exception: # ModuleNotFoundError: No module named 'megatron.model.enums' # you need to tell python where to find the clone of Megatron-LM, e.g.: # # cd /tmp # git clone https://github.com/NVIDIA/Megatron-LM # PYTHONPATH=/tmp/Megatron-LM python src/transformers/models/megatron_bert/convert_megatron_bert_checkpoint.py ... # # if you already have it cloned elsewhere, simply adjust the path to the existing path # # If the training was done using a Megatron-LM fork, e.g., # https://github.com/microsoft/Megatron-DeepSpeed/ then chances are that you need to have that one # in your path, i.e., /path/to/Megatron-DeepSpeed/ # import argparse import os import re import zipfile import torch from transformers import MegatronBertConfig #################################################################################################### def recursive_print(name, val, spaces=0): # Format the message. if name is None: msg = None else: fmt = "." * max(0, spaces - 2) + "# {:" + str(50 - spaces) + "s}" msg = fmt.format(name) # Print and recurse (if needed). if isinstance(val, dict): if msg is not None: print(msg) for k in val.keys(): recursive_print(k, val[k], spaces + 2) elif isinstance(val, torch.Tensor): print(msg, ":", val.size()) else: print(msg, ":", val) def fix_query_key_value_ordering(param, checkpoint_version, num_splits, num_heads, hidden_size): # Permutes layout of param tensor to [num_splits * num_heads * hidden_size, :] # for compatibility with later versions of NVIDIA Megatron-LM. # The inverse operation is performed inside Megatron-LM to read checkpoints: # https://github.com/NVIDIA/Megatron-LM/blob/v2.4/megatron/checkpointing.py#L209 # If param is the weight tensor of the self-attention block, the returned tensor # will have to be transposed one more time to be read by HuggingFace BERT. input_shape = param.size() if checkpoint_version == 1.0: # version 1.0 stores [num_heads * hidden_size * num_splits, :] saved_shape = (num_heads, hidden_size, num_splits) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 2) param = param.transpose(1, 2).contiguous() elif checkpoint_version >= 2.0: # other versions store [num_heads * num_splits * hidden_size, :] saved_shape = (num_heads, num_splits, hidden_size) + input_shape[1:] param = param.view(*saved_shape) param = param.transpose(0, 1).contiguous() param = param.view(*input_shape) return param #################################################################################################### def convert_megatron_checkpoint(args, input_state_dict, config): # The converted output model. output_state_dict = {} # old versions did not store training args ds_args = input_state_dict.get("args", None) if ds_args is not None: # do not make the user write a config file when the exact dimensions/sizes are already in the checkpoint # from pprint import pprint # pprint(vars(ds_args)) config.tokenizer_type = ds_args.tokenizer_type config.vocab_size = ds_args.padded_vocab_size config.max_position_embeddings = ds_args.max_position_embeddings config.hidden_size = ds_args.hidden_size config.num_hidden_layers = ds_args.num_layers config.num_attention_heads = ds_args.num_attention_heads config.intermediate_size = ds_args.ffn_hidden_size if "ffn_hidden_size" in ds_args else 4 * ds_args.hidden_size # pprint(config) # The number of heads. heads = config.num_attention_heads # The hidden_size per head. hidden_size_per_head = config.hidden_size // heads # Megatron-LM checkpoint version if "checkpoint_version" in input_state_dict.keys(): checkpoint_version = input_state_dict["checkpoint_version"] else: checkpoint_version = 0.0 # The model. model = input_state_dict["model"] # The language model. lm = model["language_model"] # The embeddings. embeddings = lm["embedding"] # The word embeddings. word_embeddings = embeddings["word_embeddings"]["weight"] # Truncate the embedding table to vocab_size rows. word_embeddings = word_embeddings[: config.vocab_size, :] # Store the word embeddings. output_state_dict["bert.embeddings.word_embeddings.weight"] = word_embeddings # The position embeddings. pos_embeddings = embeddings["position_embeddings"]["weight"] assert pos_embeddings.size(0) == config.max_position_embeddings and pos_embeddings.size(1) == config.hidden_size # Store the position embeddings. output_state_dict["bert.embeddings.position_embeddings.weight"] = pos_embeddings # The token-type embeddings. tokentype_embeddings = embeddings["tokentype_embeddings"]["weight"] # Store the position embeddings. output_state_dict["bert.embeddings.token_type_embeddings.weight"] = tokentype_embeddings # The transformer. transformer = lm["transformer"] if "transformer" in lm.keys() else lm["encoder"] # The regex to extract layer names. layer_re = re.compile("layers\.(\d+)\.([a-z0-9_.]+)\.([a-z]+)") # The simple map of names for "automated" rules. megatron_to_transformers = { "attention.dense": ".attention.output.dense.", "self_attention.dense": ".attention.output.dense.", "mlp.dense_h_to_4h": ".intermediate.dense.", "mlp.dense_4h_to_h": ".output.dense.", } # Keep track of the attention/query/value tensor. attention_qkv_weight = None # Extract the layers. for key, val in transformer.items(): # Match the name. m = layer_re.match(key) # Stop if that's not a layer if m is None: break # The index of the layer. layer_idx = int(m.group(1)) # The name of the operation. op_name = m.group(2) # Is it a weight or a bias? weight_or_bias = m.group(3) # The name of the layer. layer_name = f"bert.encoder.layer.{layer_idx}" # For layernorm(s), simply store the layer norm. if op_name.endswith("layernorm"): ln_name = "attention.ln" if op_name.startswith("input") else "ln" output_state_dict[layer_name + "." + ln_name + "." + weight_or_bias] = val # Transpose the QKV matrix. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "weight": # Make sure the QKV pointer is nil. assert attention_qkv_weight is None, "" out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head) # Store the tensor as we need the bias as well to interleave QKV and biases. attention_qkv_weight = out_val # Transpose the bias. elif ( op_name == "attention.query_key_value" or op_name == "self_attention.query_key_value" ) and weight_or_bias == "bias": # Make sure we read the weight tensor. assert attention_qkv_weight is not None, "" # Split the QKV matrix into Q, K and V. Megatron stores Q,K,V interleaved. q = attention_qkv_weight[0 * config.hidden_size : 1 * config.hidden_size, :] k = attention_qkv_weight[1 * config.hidden_size : 2 * config.hidden_size, :] v = attention_qkv_weight[2 * config.hidden_size : 3 * config.hidden_size, :] out_val = fix_query_key_value_ordering(val, checkpoint_version, 3, heads, hidden_size_per_head) # Split the bias. q_bias = out_val[0 * config.hidden_size : 1 * config.hidden_size] k_bias = out_val[1 * config.hidden_size : 2 * config.hidden_size] v_bias = out_val[2 * config.hidden_size : 3 * config.hidden_size] # Store. output_state_dict[f"{layer_name}.attention.self.query.weight"] = q output_state_dict[f"{layer_name}.attention.self.query.bias"] = q_bias output_state_dict[f"{layer_name}.attention.self.key.weight"] = k output_state_dict[f"{layer_name}.attention.self.key.bias"] = k_bias output_state_dict[f"{layer_name}.attention.self.value.weight"] = v output_state_dict[f"{layer_name}.attention.self.value.bias"] = v_bias # Clear the stored tensor. attention_qkv_weight = None # Copy weights and biases as is. elif weight_or_bias in ["weight", "bias"]: out_name = megatron_to_transformers[op_name] output_state_dict[layer_name + out_name + weight_or_bias] = val # The final layernorm. output_state_dict["bert.encoder.ln.weight"] = transformer["final_layernorm.weight"] output_state_dict["bert.encoder.ln.bias"] = transformer["final_layernorm.bias"] # The pooler. pooler = lm["pooler"] # Store the matrix and the bias. output_state_dict["bert.pooler.dense.weight"] = pooler["dense.weight"] output_state_dict["bert.pooler.dense.bias"] = pooler["dense.bias"] # The LM head from Megatron (for RACE). lm_head = model["lm_head"] # The transform matrix. output_state_dict["cls.predictions.transform.dense.weight"] = lm_head["dense.weight"] output_state_dict["cls.predictions.transform.dense.bias"] = lm_head["dense.bias"] # The transform LN. output_state_dict["cls.predictions.transform.LayerNorm.weight"] = lm_head["layernorm.weight"] output_state_dict["cls.predictions.transform.LayerNorm.bias"] = lm_head["layernorm.bias"] # For the decoder, we replicate the weights. output_state_dict["cls.predictions.decoder.weight"] = word_embeddings output_state_dict["cls.predictions.bias"] = lm_head["bias"] # The classifier from Megatron (for MLNI). binary_head = model["binary_head"] # Store the classifier. output_state_dict["cls.seq_relationship.weight"] = binary_head["weight"] output_state_dict["cls.seq_relationship.bias"] = binary_head["bias"] # It should be done! return output_state_dict #################################################################################################### def main(): # Create the argument parser. parser = argparse.ArgumentParser() parser.add_argument("--print-checkpoint-structure", action="store_true") parser.add_argument("path_to_checkpoint", type=str, help="Path to the ZIP file containing the checkpoint") parser.add_argument( "--config_file", default="", type=str, help="An optional config json file describing the pre-trained model.", ) args = parser.parse_args() # Extract the basename. basename = os.path.dirname(args.path_to_checkpoint) # Load the model. # the .zip is very optional, let's keep it for backward compatibility print(f'Extracting PyTorch state dictionary from "{args.path_to_checkpoint}"') if args.path_to_checkpoint.endswith(".zip"): with zipfile.ZipFile(args.path_to_checkpoint, "r") as checkpoint: with checkpoint.open("release/mp_rank_00/model_optim_rng.pt") as pytorch_dict: input_state_dict = torch.load(pytorch_dict, map_location="cpu") else: input_state_dict = torch.load(args.path_to_checkpoint, map_location="cpu") if args.config_file == "": # Default config of megatron-bert 345m config = MegatronBertConfig() # different megatron-bert-*-345m models have different vocab sizes, so override the default # config (which is for megatron-bert-cased-345m) with the actual vocab dimension config.vocab_size = input_state_dict["model"]["lm_head"]["bias"].numel() else: config = MegatronBertConfig.from_json_file(args.config_file) # Convert. print("Converting") output_state_dict = convert_megatron_checkpoint(args, input_state_dict, config) # Print the structure of converted state dict. if args.print_checkpoint_structure: recursive_print(None, output_state_dict) # Store the config to file. print("Saving config") config.save_pretrained(basename) # Store the state_dict to file. output_checkpoint_file = os.path.join(basename, "pytorch_model.bin") print(f'Saving checkpoint to "{output_checkpoint_file}"') torch.save(output_state_dict, output_checkpoint_file) #################################################################################################### if __name__ == "__main__": main() ####################################################################################################
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/generation/test_utils.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np from transformers import is_torch_available, pipeline from transformers.testing_utils import require_torch, slow, torch_device from ..test_modeling_common import floats_tensor, ids_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSpeechSeq2Seq, AutoModelForVision2Seq, AutoTokenizer, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, GPT2Tokenizer, ImageGPTForCausalImageModeling, SpeechEncoderDecoderModel, top_k_top_p_filtering, ) from transformers.generation import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitsProcessorList, MaxLengthCriteria, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PhrasalConstraint, RepetitionPenaltyLogitsProcessor, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, StoppingCriteria, StoppingCriteriaList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) class GenerationTesterMixin: model_tester = None all_generative_model_classes = () input_name = "input_ids" def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:max_batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] # TransfoXL has no attention mask if "transfoxl" in config.__class__.__name__.lower(): attention_mask = None else: attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:max_batch_size, :sequence_length] return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, "bad_words_ids": [[1, 0]], "no_repeat_ngram_size": 2, "repetition_penalty": 1.2, } logits_processor = LogitsProcessorList( ( [ HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2), ] if diversity_penalty is not None else [] ) + ( [ MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id), ] if eos_token_id is not None else [] ) + ( [ ForcedBOSTokenLogitsProcessor(forced_bos_token_id), ] if forced_bos_token_id is not None else [] ) + ( [ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)] if forced_eos_token_id is not None else [] ) + [ NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id), NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]), RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]), ] ) return process_kwargs, logits_processor @staticmethod def _get_warper_and_kwargs(num_beams): warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7} logits_warper = LogitsProcessorList( [ TemperatureLogitsWarper(warp_kwargs["temperature"]), TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), ] ) return warp_kwargs, logits_warper @staticmethod def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, "num_beam_groups": 2, # one beam per group "diversity_penalty": 2.0, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=beam_kwargs["num_beam_groups"], ) return beam_kwargs, beam_scorer @staticmethod def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": num_return_sequences * 4, "num_return_sequences": num_return_sequences, } beam_scorer = ConstrainedBeamSearchScorer( batch_size=batch_size, constraints=constraints, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _greedy_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_greedy, output_generate def _sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_sample, output_generate def _beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_search = model.beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_search def _beam_sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, beam_scorer, beam_kwargs, logits_warper, logits_warper_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_warper_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams * num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor = LogitsProcessorList() logits_processor.append(InfNanRemoveLogitsProcessor()) torch.manual_seed(0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_sample = model.beam_sample( input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0), beam_scorer, max_length=max_length, logits_warper=logits_warper, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_sample def _group_beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.group_beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _constrained_beam_search_generate( self, model, input_ids, attention_mask, max_length, constrained_beam_scorer, constraints, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, constraints=constraints, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=constrained_beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.constrained_beam_search( input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0), constrained_beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _contrastive_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): contrastive_search_kwargs = { "penalty_alpha": 0.6, "top_k": 5, } if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, **contrastive_search_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)]) output_contrastive = model.contrastive_search( input_ids, stopping_criteria=stopping_criteria, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, **contrastive_search_kwargs, ) return output_contrastive, output_generate def test_greedy_generate(self): # check `generate()` and `greedy_search()` are equal for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_greedy.tolist(), output_generate.tolist()) def test_greedy_generate_dict_outputs(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() if not hasattr(config, "use_cache"): # only relevant if model has "use_cache" return config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) # check `generate()` and `sample()` yield equal results for `num_return_sequences` output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) def test_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=2, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_sample, SampleEncoderDecoderOutput) self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) else: self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist()) for output in (output_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=2) def test_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) # check `generate()` and `beam_search()` are equal output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) # check `generate()` and `beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) def test_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_beam_search_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None if not hasattr(config, "use_cache"): # only relevant if model has "use_cache" return model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_beam, output_generate = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist()) for output in (output_beam, output_generate): self._check_outputs( output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams ) def test_beam_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) model = model_class(config).to(torch_device).eval() # check `generate()` and `beam_search()` are equal # change `num_return_sequences = 2` but not for `beam_scorer` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0] * num_return_sequences, max_length ) beam_kwargs["num_return_sequences"] = num_return_sequences output_generate, output_beam_sample = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=num_return_sequences, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist()) def test_beam_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0] * num_return_sequences, max_length ) beam_kwargs["num_return_sequences"] = num_return_sequences output_beam_sample, output_generate = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=num_return_sequences, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_sample, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_generate_without_input_ids(self): config, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_group_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) # check `generate()` and `group_beam_search()` are equal beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) # check `generate()` and `group_beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) def test_group_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) num_return_sequences = 1 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist()) self.assertTrue( torch.allclose( output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3 ) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_group_beam_search, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_constrained_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # check `generate()` and `constrained_beam_search()` are equal # Sample constraints if not input_ids.dtype == torch.float32: min_id = torch.min(input_ids) + 3 max_id = torch.max(input_ids) else: # otherwise this throws an error for Speech2TextModel since its inputs are floating points min_id = 3 max_id = 100 force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) # check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences` # Sample constraints force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] num_return_sequences = 2 max_length = 20 beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) def test_constrained_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # Sample constraints min_id = 3 max_id = model.config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_contrastive_generate(self): # check `generate()` and `contrastive_search()` are equal for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): return config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): return config.use_cache = True config.is_decoder = True # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_contrastive.tolist(), output_generate.tolist()) def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): return # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): return config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_contrastive.sequences.tolist()) for output in (output_contrastive, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_generate_with_head_masking(self): """Test designed for encoder-decoder models to ensure the attention head masking is used.""" attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device) # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue head_masking = { "head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device), "decoder_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), "cross_attn_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), } signature = inspect.signature(model.forward) # We want to test only models where encoder/decoder head masking is implemented if not set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( input_ids, attention_mask=attention_mask, num_beams=1, output_attentions=True, return_dict_in_generate=True, remove_invalid_values=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions if config.is_encoder_decoder: # encoder self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here attentions = output.attentions if not use_cache else output.attentions[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_attentions_for_generate( num_sequences_in_output, attentions=attentions, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States if config.is_encoder_decoder: # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_hidden_states_for_generate( num_sequences_in_output, hidden_states, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length + idx if not use_cache else 1 src_len = min_length + idx expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length + idx if not use_cache else 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): encoder_expected_shape = (batch_size, seq_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break self.assertTrue(flag) @require_torch class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p function behaves as expected def test_top_k_top_p_filtering(self): logits = torch.tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 4 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 4 highest values <= 0.6 ], dtype=torch.float, device=torch_device, ) non_inf_expected_idx = torch.tensor( [[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]], dtype=torch.long, device=torch_device, ) # expected non filtered idx as noted above non_inf_expected_output = torch.tensor( [ 8.2221, 8.4321, 7.4402, 9.3845, 6.2712, 8.8275, 7.3858, 9.6770, ], # expected non filtered values as noted above dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")].to(device=torch_device) non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device) self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12)) self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx))) # tests whether the function uses filter_value instead of default -inf def test_top_k_top_p_filtering_with_filter_value(self): logits = torch.tensor( [ [ 1, 1, 1, 0.99, # get filtered by top-p filtering 0.98, # get filtered by top-k filtering ] ], dtype=torch.float, device=torch_device, ) expected_output = torch.tensor( [[1, 1, 1, 0, 0]], dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=4, top_p=0.5, filter_value=0.0) self.assertTrue(torch.allclose(expected_output, output, atol=1e-12)) @require_torch class GenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_torch_available(): framework_dependent_parameters = { "AutoModelForCausalLM": AutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": AutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": AutoModelForSeq2SeqLM, "AutoModelForVision2Seq": AutoModelForVision2Seq, "LogitsProcessorList": LogitsProcessorList, "MinLengthLogitsProcessor": MinLengthLogitsProcessor, "create_tensor_fn": torch.tensor, "floats_tensor": floats_tensor, "return_tensors": "pt", } @slow def test_diverse_beam_search(self): # PT-only test: TF doesn't have a diverse beam search implementation article = """Justin Timberlake and Jessica Biel, welcome to parenthood. The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both.""" bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], ) def test_max_length_backward_compat_greedy(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_sample(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with torch.no_grad(): with self.assertWarns(UserWarning): bart_model.sample( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 2 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): _ = bart_model.beam_search( input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs ) def test_max_length_backward_compat_group_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria & group beam search article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs ) def test_max_length_warning_if_different(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) # Greedy input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, stopping_criteria=stopping_criteria, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Sample with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.sample( input_ids, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Beam beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.beam_search( input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs, ) # Grouped beam search diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, stopping_criteria=stopping_criteria, num_beams=num_beams, max_length=max_length, **model_kwargs, ) def test_custom_stopping_criteria_overload_error(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) stopping_criteria = StoppingCriteriaList() stopping_criteria.append(MaxLengthCriteria(max_length=42)) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32) def test_custom_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) class DummyCriteria(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= 20 stopping_criteria = StoppingCriteriaList() stopping_criteria.append(DummyCriteria()) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape), [1, 20], ) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape), [1, 18], ) def test_stop_sequence_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria prompt = """Hello I believe in""" generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") output = generator(prompt) self.assertEqual( output, [ { "generated_text": ( "Hello I believe in in in number number number number number number number number number" ) } ], ) output = generator(prompt, stop_sequence=" number") self.assertEqual(output, [{"generated_text": "Hello I believe in in in number"}]) def test_generate_non_nlp_input_ids_as_kwarg(self): # PT-only test: AFAIK there's no non-NLP model architecture in TF that supports `input_ids` as its only input model = ImageGPTForCausalImageModeling.from_pretrained( "hf-internal-testing/tiny-random-imagegpt", max_length=10 ).to(torch_device) input_ids = ids_tensor((3, 5), vocab_size=10) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 10)) def test_generate_input_values_as_encoder_kwarg(self): # PT-only test: AFAIK there's no generate-capable architecture in TF that supports `input_values` as its input input_values = floats_tensor((2, 250)) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu() output_sequences = model.generate(input_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_transition_scores_group_beam_search_encoder_decoder(self): # PT-only test: TF doesn't have group beam search articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=2, num_beam_groups=2, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) @slow def test_beam_search_example_integration(self): # PT-only test: TF doesn't have a BeamSearchScorer # exactly the example provided in the docstrings of beam search, which previously # failed after directly copying from it. Refer to PR #15555 tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt bist du?"]) @slow def test_constrained_beam_search(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids constraints = [ PhrasalConstraint(force_tokens), PhrasalConstraint(force_tokens_2), ] starting_text = ["The soldiers were not prepared and"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, max_length=30, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers were not prepared and didn't know what to do. They had no idea how they would react if" " the enemy attacked them, big weapons scared" ], ) @slow def test_constrained_beam_search_mixed(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids flexible_phrases = tokenizer( ["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False ).input_ids constraints = [ PhrasalConstraint(force_phrase), DisjunctiveConstraint(flexible_phrases), ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, # max_length=20, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_mixed_mixin(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_word = "scared" force_flexible = ["scream", "screams", "screaming", "screamed"] force_words_ids = [ tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids, tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids, ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_example_translation_mixin(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" force_words = ["sind"] input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_constrained_beam_search_example_integration(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 5 beams num_beams = 5 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) def test_constrained_beam_search_mixin_type_checks(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") encoder_input_str = "translate English to German: How old are you?" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids] model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[-1]]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_contrastive_search_batched(self): # PT-only test: TF doesn't have constrained beam search # Tests that contrastive search works with batched inputs (i.e. has the same output as for non-batched inputs) articles = ["Foo", "Bar Baz"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) model.config.eos_token_id = None input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) input_ids = tokenizer(articles[1], return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate( input_ids=input_ids, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) batched_out = tokenizer.decode(output_sequences_batched.sequences[1], skip_special_tokens=True) out = tokenizer.decode(output_sequences.sequences[0], skip_special_tokens=True) self.assertEqual(batched_out, out) # output_sequences_batched.scores[0][1] -> 1st set of logits, 2nd sequence max_score_diff = (output_sequences_batched.scores[0][1] - output_sequences.scores[0][0]).abs().max() self.assertTrue(max_score_diff < 1e-5) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has TF equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 15 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) torch.manual_seed(0) eos_token_id = 846 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) torch.manual_seed(0) eos_token_id = [846, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_generate_from_inputs_embeds_decoder_only(self): # PT-only test: TF doesn't have a model with support to generate from input embeds (yet ;)) # Note: the model must support generation from input embeddings model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text, text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_from_ids = model.generate(input_ids) self.assertEqual(outputs_from_ids.shape, (2, 20)) # Same thing, but from input embeddings inputs_embeds = model.transformer.wte(input_ids) outputs_from_embeds = model.generate(input_ids, inputs_embeds=inputs_embeds) self.assertListEqual(outputs_from_ids.tolist(), outputs_from_embeds.tolist()) # But if we pass different inputs_embeds, we should get different outputs torch.manual_seed(0) random_embeds = torch.rand_like(inputs_embeds) outputs_from_rand_embeds = model.generate(input_ids, inputs_embeds=random_embeds) with self.assertRaises(AssertionError): self.assertListEqual(outputs_from_rand_embeds.tolist(), outputs_from_embeds.tolist()) # input_ids is not a required input -- if we don't pass it, the newly generated tokens will be the same outputs_from_embeds_wo_ids = model.generate( inputs_embeds=inputs_embeds, max_new_tokens=20 - inputs_embeds.shape[1] ) self.assertListEqual( outputs_from_embeds[:, inputs_embeds.shape[1] :].tolist(), outputs_from_embeds_wo_ids[:, 1:].tolist(), ) def test_model_kwarg_encoder_signature_filtering(self): # Has TF equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) output = bart_model.generate(input_ids).cpu().numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=None, **kwargs): return super().forward(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) fake_output = bart_model.generate(input_ids, foo="bar").cpu().numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def forward(self, input_ids, **kwargs): return super().forward(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared).to(torch_device) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).cpu().numpy() with self.assertRaises(TypeError): # FakeEncoder.forward() accepts **kwargs -> no filtering -> type error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar")
# coding=utf-8 # Copyright 2020 The HuggingFace Team Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a clone of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import unittest import numpy as np from transformers import is_torch_available, pipeline from transformers.testing_utils import require_torch, slow, torch_device from ..test_modeling_common import floats_tensor, ids_tensor from .test_framework_agnostic import GenerationIntegrationTestsMixin if is_torch_available(): import torch from transformers import ( AutoModelForCausalLM, AutoModelForSeq2SeqLM, AutoModelForSpeechSeq2Seq, AutoModelForVision2Seq, AutoTokenizer, BartForConditionalGeneration, BartTokenizer, GPT2LMHeadModel, GPT2Tokenizer, ImageGPTForCausalImageModeling, SpeechEncoderDecoderModel, top_k_top_p_filtering, ) from transformers.generation import ( BeamSampleDecoderOnlyOutput, BeamSampleEncoderDecoderOutput, BeamSearchDecoderOnlyOutput, BeamSearchEncoderDecoderOutput, BeamSearchScorer, ConstrainedBeamSearchScorer, DisjunctiveConstraint, ForcedBOSTokenLogitsProcessor, ForcedEOSTokenLogitsProcessor, GreedySearchDecoderOnlyOutput, GreedySearchEncoderDecoderOutput, HammingDiversityLogitsProcessor, InfNanRemoveLogitsProcessor, LogitsProcessorList, MaxLengthCriteria, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PhrasalConstraint, RepetitionPenaltyLogitsProcessor, SampleDecoderOnlyOutput, SampleEncoderDecoderOutput, StoppingCriteria, StoppingCriteriaList, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) class GenerationTesterMixin: model_tester = None all_generative_model_classes = () input_name = "input_ids" def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 2 sequence_length = input_ids.shape[-1] // 2 input_ids = input_ids[:max_batch_size, :sequence_length] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` if isinstance(config.eos_token_id, int): config.eos_token_id = [config.eos_token_id] config.pad_token_id = config.eos_token_id[0] # TransfoXL has no attention mask if "transfoxl" in config.__class__.__name__.lower(): attention_mask = None else: attention_mask = torch.ones_like(input_ids, dtype=torch.long)[:max_batch_size, :sequence_length] return config, input_ids, attention_mask, max_length @staticmethod def _get_logits_processor_and_kwargs( input_length, eos_token_id, forced_bos_token_id=None, forced_eos_token_id=None, max_length=None, diversity_penalty=None, ): process_kwargs = { "min_length": input_length + 1 if max_length is None else max_length - 1, "bad_words_ids": [[1, 0]], "no_repeat_ngram_size": 2, "repetition_penalty": 1.2, } logits_processor = LogitsProcessorList( ( [ HammingDiversityLogitsProcessor(diversity_penalty, num_beams=2, num_beam_groups=2), ] if diversity_penalty is not None else [] ) + ( [ MinLengthLogitsProcessor(process_kwargs["min_length"], eos_token_id), ] if eos_token_id is not None else [] ) + ( [ ForcedBOSTokenLogitsProcessor(forced_bos_token_id), ] if forced_bos_token_id is not None else [] ) + ( [ForcedEOSTokenLogitsProcessor(max_length, forced_eos_token_id)] if forced_eos_token_id is not None else [] ) + [ NoBadWordsLogitsProcessor(process_kwargs["bad_words_ids"], eos_token_id), NoRepeatNGramLogitsProcessor(process_kwargs["no_repeat_ngram_size"]), RepetitionPenaltyLogitsProcessor(process_kwargs["repetition_penalty"]), ] ) return process_kwargs, logits_processor @staticmethod def _get_warper_and_kwargs(num_beams): warp_kwargs = {"top_k": 10, "top_p": 0.7, "temperature": 0.7} logits_warper = LogitsProcessorList( [ TemperatureLogitsWarper(warp_kwargs["temperature"]), TopKLogitsWarper(top_k=warp_kwargs["top_k"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), TopPLogitsWarper(top_p=warp_kwargs["top_p"], min_tokens_to_keep=(2 if num_beams > 1 else 1)), ] ) return warp_kwargs, logits_warper @staticmethod def _get_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_diverse_beam_scorer_and_kwargs(batch_size, max_length, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": 2, "num_return_sequences": num_return_sequences, "num_beam_groups": 2, # one beam per group "diversity_penalty": 2.0, } beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=beam_kwargs["num_beam_groups"], ) return beam_kwargs, beam_scorer @staticmethod def _get_constrained_beam_scorer_and_kwargs(batch_size, max_length, constraints, num_return_sequences=1): beam_kwargs = { "early_stopping": False, "length_penalty": 2.0, "num_beams": num_return_sequences * 4, "num_return_sequences": num_return_sequences, } beam_scorer = ConstrainedBeamSearchScorer( batch_size=batch_size, constraints=constraints, num_beams=beam_kwargs["num_beams"], device=torch_device, length_penalty=beam_kwargs["length_penalty"], do_early_stopping=beam_kwargs["early_stopping"], num_beam_hyps_to_keep=num_return_sequences, ) return beam_kwargs, beam_scorer @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = torch.zeros_like(input_ids[:, :1]) + model._get_decoder_start_token_id() attention_mask = None return encoder_outputs, input_ids, attention_mask def _greedy_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_greedy = model.greedy_search( input_ids, max_length=max_length, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_greedy, output_generate def _sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, logits_processor, logits_warper, logits_warper_kwargs, process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, num_beams=1, max_length=max_length, num_return_sequences=num_return_sequences, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_warper_kwargs, **process_kwargs, **model_kwargs, ) torch.manual_seed(0) kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor.append(InfNanRemoveLogitsProcessor()) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_sample = model.sample( input_ids.repeat_interleave(num_return_sequences, dim=0), max_length=max_length, logits_processor=logits_processor, logits_warper=logits_warper, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_sample, output_generate def _beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_search = model.beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_search def _beam_sample_generate( self, model, input_ids, attention_mask, max_length, num_return_sequences, beam_scorer, beam_kwargs, logits_warper, logits_warper_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): torch.manual_seed(0) model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=True, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_warper_kwargs, **model_kwargs, ) # beam_search does not automatically interleave `batch_size` dim for `num_beams * num_return_sequences` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams * num_return_sequences, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0) # prevent flaky generation test failures logits_processor = LogitsProcessorList() logits_processor.append(InfNanRemoveLogitsProcessor()) torch.manual_seed(0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_beam_sample = model.beam_sample( input_ids.repeat_interleave(beam_scorer.num_beams * num_return_sequences, dim=0), beam_scorer, max_length=max_length, logits_warper=logits_warper, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_beam_sample def _group_beam_search_generate( self, model, input_ids, attention_mask, max_length, beam_scorer, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.group_beam_search( input_ids.repeat_interleave(beam_scorer.num_beams, dim=0), beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _constrained_beam_search_generate( self, model, input_ids, attention_mask, max_length, constrained_beam_scorer, constraints, beam_kwargs, logits_processor, logits_process_kwargs, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, max_length=max_length, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, constraints=constraints, **beam_kwargs, **logits_process_kwargs, **model_kwargs, ) # group_beam_search does not automatically interleave `batch_size` dim for `num_beams` kwargs = {} if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, num_interleave=constrained_beam_scorer.num_beams, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs elif attention_mask is not None: attention_mask = attention_mask.repeat_interleave(constrained_beam_scorer.num_beams, dim=0) with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_group_beam_search = model.constrained_beam_search( input_ids.repeat_interleave(constrained_beam_scorer.num_beams, dim=0), constrained_beam_scorer, max_length=max_length, logits_processor=logits_processor, output_scores=output_scores, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, ) return output_generate, output_group_beam_search def _contrastive_generate( self, model, input_ids, attention_mask, max_length, output_scores=False, output_attentions=False, output_hidden_states=False, return_dict_in_generate=False, ): contrastive_search_kwargs = { "penalty_alpha": 0.6, "top_k": 5, } if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], eos_token_id=model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) kwargs = {} model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} output_generate = model.generate( input_ids, do_sample=False, num_beams=1, max_length=max_length, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, remove_invalid_values=True, **logits_process_kwargs, **model_kwargs, **contrastive_search_kwargs, ) if model.config.is_encoder_decoder: encoder_outputs, input_ids, attention_mask = self._get_encoder_outputs( model, input_ids, attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) kwargs["encoder_outputs"] = encoder_outputs with torch.no_grad(): model_kwargs = {"attention_mask": attention_mask} if attention_mask is not None else {} stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)]) output_contrastive = model.contrastive_search( input_ids, stopping_criteria=stopping_criteria, logits_processor=logits_processor, output_attentions=output_attentions, output_hidden_states=output_hidden_states, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **kwargs, **model_kwargs, **contrastive_search_kwargs, ) return output_contrastive, output_generate def test_greedy_generate(self): # check `generate()` and `greedy_search()` are equal for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_greedy.tolist(), output_generate.tolist()) def test_greedy_generate_dict_outputs(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_greedy, GreedySearchEncoderDecoderOutput) self.assertIsInstance(output_generate, GreedySearchEncoderDecoderOutput) else: self.assertIsInstance(output_greedy, GreedySearchDecoderOnlyOutput) self.assertIsInstance(output_generate, GreedySearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config) def test_greedy_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() if not hasattr(config, "use_cache"): # only relevant if model has "use_cache" return config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_greedy, output_generate = self._greedy_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_greedy.sequences.tolist()) for output in (output_greedy, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=2) # check `generate()` and `sample()` are equal output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=1, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) # check `generate()` and `sample()` yield equal results for `num_return_sequences` output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=3, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, ) self.assertListEqual(output_sample.tolist(), output_generate.tolist()) def test_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: # disable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], model.config.eos_token_id, forced_bos_token_id=model.config.forced_bos_token_id, forced_eos_token_id=model.config.forced_eos_token_id, max_length=max_length, ) logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) output_sample, output_generate = self._sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=2, logits_processor=logits_processor, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, process_kwargs=process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_sample, SampleEncoderDecoderOutput) self.assertIsInstance(output_generate, SampleEncoderDecoderOutput) else: self.assertIsInstance(output_sample, SampleDecoderOnlyOutput) self.assertIsInstance(output_generate, SampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_sample.sequences.tolist()) for output in (output_sample, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=2) def test_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) # check `generate()` and `beam_search()` are equal output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) # check `generate()` and `beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) def test_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_beam_search = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_beam_search_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None if not hasattr(config, "use_cache"): # only relevant if model has "use_cache" return model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs(input_ids.shape[0], max_length) config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_beam, output_generate = self._beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_process_kwargs=logits_process_kwargs, logits_processor=logits_processor, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_beam.sequences.tolist()) for output in (output_beam, output_generate): self._check_outputs( output, input_ids, model.config, use_cache=True, num_return_sequences=beam_scorer.num_beams ) def test_beam_sample_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) model = model_class(config).to(torch_device).eval() # check `generate()` and `beam_search()` are equal # change `num_return_sequences = 2` but not for `beam_scorer` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0] * num_return_sequences, max_length ) beam_kwargs["num_return_sequences"] = num_return_sequences output_generate, output_beam_sample = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=num_return_sequences, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_sample.tolist()) def test_beam_sample_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() logits_warper_kwargs, logits_warper = self._get_warper_and_kwargs(num_beams=1) num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_beam_scorer_and_kwargs( input_ids.shape[0] * num_return_sequences, max_length ) beam_kwargs["num_return_sequences"] = num_return_sequences output_beam_sample, output_generate = self._beam_sample_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, num_return_sequences=num_return_sequences, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_warper=logits_warper, logits_warper_kwargs=logits_warper_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_sample, BeamSampleEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSampleEncoderDecoderOutput) else: self.assertIsInstance(output_beam_sample, BeamSampleDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSampleDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_sample.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_sample["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_sample, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_generate_without_input_ids(self): config, _, _, max_length = self._get_input_ids_and_config() # if no bos token id => cannot generate from None if config.bos_token_id is None: return for model_class in self.all_generative_model_classes: model = model_class(config).to(torch_device) model.eval() output_ids_generate = model.generate(do_sample=False, max_length=max_length, remove_invalid_values=True) self.assertIsNotNone(output_ids_generate) def test_group_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) # check `generate()` and `group_beam_search()` are equal beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs(input_ids.shape[0], max_length) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) # check `generate()` and `group_beam_search()` are equal for `num_return_sequences` num_return_sequences = 2 if model.config.is_encoder_decoder: max_length = 4 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_group_beam_search.tolist()) def test_group_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 4 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, diversity_penalty=2.0, ) num_return_sequences = 1 beam_kwargs, beam_scorer = self._get_diverse_beam_scorer_and_kwargs( input_ids.shape[0], max_length, num_return_sequences=num_return_sequences ) output_generate, output_group_beam_search = self._group_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, beam_scorer=beam_scorer, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_group_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_group_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_group_beam_search.sequences.tolist()) self.assertTrue( torch.allclose( output_generate["sequences_scores"], output_group_beam_search["sequences_scores"], atol=1e-3 ) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_group_beam_search, output_generate): self._check_outputs( output, input_ids, model.config, num_return_sequences=num_return_sequences * beam_scorer.num_beams ) def test_constrained_beam_search_generate(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # check `generate()` and `constrained_beam_search()` are equal # Sample constraints if not input_ids.dtype == torch.float32: min_id = torch.min(input_ids) + 3 max_id = torch.max(input_ids) else: # otherwise this throws an error for Speech2TextModel since its inputs are floating points min_id = 3 max_id = 100 force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) # check `generate()` and `constrained_beam_search()` are equal for `num_return_sequences` # Sample constraints force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] num_return_sequences = 2 max_length = 20 beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=num_return_sequences ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, ) self.assertListEqual(output_generate.tolist(), output_beam_search.tolist()) for generation_output in output_generate: self._check_sequence_inside_sequence(force_tokens, generation_output) def test_constrained_beam_search_generate_dict_output(self): for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # disable cache config.use_cache = False # It is important set set the eos_token_id to None to ensure that no sequences # shorter than `max_length` can be generated which could lead to flaky circle ci # failures if the top `num_return_sequences` beams are all shorter than the longest beam config.eos_token_id = None config.forced_eos_token_id = None model = model_class(config).to(torch_device).eval() if model.config.is_encoder_decoder: max_length = 20 logits_process_kwargs, logits_processor = self._get_logits_processor_and_kwargs( input_ids.shape[-1], config.eos_token_id, config.forced_bos_token_id, config.forced_eos_token_id, max_length, ) # Sample constraints min_id = 3 max_id = model.config.vocab_size force_tokens = torch.randint(min_id, max_id, (1, 2)).tolist()[0] constraints = [ PhrasalConstraint(force_tokens), ] beam_kwargs, beam_scorer = self._get_constrained_beam_scorer_and_kwargs( input_ids.shape[0], max_length, constraints, num_return_sequences=1 ) output_generate, output_beam_search = self._constrained_beam_search_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, constrained_beam_scorer=beam_scorer, constraints=constraints, beam_kwargs=beam_kwargs, logits_processor=logits_processor, logits_process_kwargs=logits_process_kwargs, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) if model.config.is_encoder_decoder: self.assertIsInstance(output_beam_search, BeamSearchEncoderDecoderOutput) self.assertIsInstance(output_generate, BeamSearchEncoderDecoderOutput) else: self.assertIsInstance(output_beam_search, BeamSearchDecoderOnlyOutput) self.assertIsInstance(output_generate, BeamSearchDecoderOnlyOutput) self.assertListEqual(output_generate.sequences.tolist(), output_beam_search.sequences.tolist()) self.assertTrue( torch.allclose(output_generate["sequences_scores"], output_beam_search["sequences_scores"], atol=1e-3) ) self.assertTrue(output_generate["sequences_scores"].shape == (output_generate["sequences"].shape[0],)) self.assertTrue((output_generate["sequences_scores"] < 0).all().item()) for output in (output_beam_search, output_generate): self._check_outputs(output, input_ids, model.config, num_return_sequences=beam_scorer.num_beams) def test_contrastive_generate(self): # check `generate()` and `contrastive_search()` are equal for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): return config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): return config.use_cache = True config.is_decoder = True # test old generation output for backwards compatibility model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length ) self.assertListEqual(output_contrastive.tolist(), output_generate.tolist()) def test_contrastive_generate_dict_outputs_use_cache(self): for model_class in self.all_generative_model_classes: # won't fix: FSMT and Reformer have a different cache variable type (and format). if any(model_name in model_class.__name__.lower() for model_name in ["fsmt", "reformer"]): return # enable cache config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() # NOTE: contrastive search only works with cache on at the moment. if not hasattr(config, "use_cache"): return config.use_cache = True config.is_decoder = True model = model_class(config).to(torch_device).eval() output_contrastive, output_generate = self._contrastive_generate( model=model, input_ids=input_ids, attention_mask=attention_mask, max_length=max_length, output_scores=True, output_hidden_states=True, output_attentions=True, return_dict_in_generate=True, ) self.assertListEqual(output_generate.sequences.tolist(), output_contrastive.sequences.tolist()) for output in (output_contrastive, output_generate): self._check_outputs(output, input_ids, model.config, use_cache=True) def test_generate_with_head_masking(self): """Test designed for encoder-decoder models to ensure the attention head masking is used.""" attention_names = ["encoder_attentions", "decoder_attentions", "cross_attentions"] for model_class in self.all_generative_model_classes: config, input_ids, attention_mask, max_length = self._get_input_ids_and_config() model = model_class(config).to(torch_device) # We want to test only encoder-decoder models if not config.is_encoder_decoder: continue head_masking = { "head_mask": torch.zeros(config.encoder_layers, config.encoder_attention_heads, device=torch_device), "decoder_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), "cross_attn_head_mask": torch.zeros( config.decoder_layers, config.decoder_attention_heads, device=torch_device ), } signature = inspect.signature(model.forward) # We want to test only models where encoder/decoder head masking is implemented if not set(head_masking.keys()) < {*signature.parameters.keys()}: continue for attn_name, (name, mask) in zip(attention_names, head_masking.items()): out = model.generate( input_ids, attention_mask=attention_mask, num_beams=1, output_attentions=True, return_dict_in_generate=True, remove_invalid_values=True, **{name: mask}, ) # We check the state of decoder_attentions and cross_attentions just from the last step attn_weights = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1] self.assertEqual(sum([w.sum().item() for w in attn_weights]), 0.0) def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, seq_length = input_ids.shape num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions if config.is_encoder_decoder: # encoder self._check_encoder_attention_for_generate(output.encoder_attentions, batch_size, config, seq_length) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here attentions = output.attentions if not use_cache else output.attentions[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_attentions_for_generate( num_sequences_in_output, attentions=attentions, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States if config.is_encoder_decoder: # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) else: # if use_cache first input is equal to no use_cache, so skip here hidden_states = output.hidden_states if not use_cache else output.hidden_states[1:] min_length = seq_length if not use_cache else seq_length + 1 self._check_hidden_states_for_generate( num_sequences_in_output, hidden_states, min_length=min_length, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _check_scores(self, batch_size, scores, length, config): expected_shape = (batch_size, config.vocab_size) self.assertIsInstance(scores, tuple) self.assertEqual(len(scores), length) self.assertListEqual([iter_scores.shape for iter_scores in scores], [expected_shape] * len(scores)) def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length + idx if not use_cache else 1 src_len = min_length + idx expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_encoder_attention_for_generate(self, attentions, batch_size, config, seq_length): encoder_expected_shape = (batch_size, config.num_attention_heads, seq_length, seq_length) self.assertIsInstance(attentions, tuple) self.assertListEqual( [layer_attentions.shape for layer_attentions in attentions], [encoder_expected_shape] * len(attentions), ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length + idx if not use_cache else 1 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) def _check_encoder_hidden_states_for_generate(self, hidden_states, batch_size, config, seq_length): encoder_expected_shape = (batch_size, seq_length, config.hidden_size) self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in hidden_states], [encoder_expected_shape] * len(hidden_states), ) def _check_sequence_inside_sequence(self, tensor_1, tensor_2): # check if tensor_1 inside tensor_2 or tensor_2 inside tensor_1. # set to same device. we don't care what device. if not isinstance(tensor_1, list): tensor_1 = tensor_1.cpu().tolist() if not isinstance(tensor_2, list): tensor_2 = tensor_2.cpu().tolist() in_order = len(tensor_1) <= len(tensor_2) longer = tensor_2 if in_order else tensor_1 shorter = tensor_1 if in_order else tensor_2 flag = False chunk_size = len(shorter) for chunk_idx in range(len(longer) - chunk_size + 1): subseq = longer[chunk_idx : chunk_idx + chunk_size] if subseq == shorter: flag = True break self.assertTrue(flag) @require_torch class UtilsFunctionsTest(unittest.TestCase): # tests whether the top_k_top_p function behaves as expected def test_top_k_top_p_filtering(self): logits = torch.tensor( [ [ 8.2220991, # 3rd highest value; idx. 0 -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -3.2012153, 2.92777176, 1.88171953, 7.35341276, 8.43207833, # 2nd highest value; idx. 10 -9.85711836, -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -6.99778438, 4.71551189, -0.18771637, 7.44020759, # 4th highest value; idx. 25 9.38450987, # 1st highest value; idx. 26 2.12662941, -9.32562038, 2.35652522, ], # cummulative prob of 4 highest values <= 0.6 [ 0.58425518, 4.53139238, -5.57510464, -6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922, # 4th highest value; idx. 13 2.28520723, 4.82767506, 4.30421368, 8.8275313, # 2nd highest value; idx. 17 5.44029958, -4.4735794, 7.38579536, # 3rd highest value; idx. 20 -2.91051663, 2.61946077, -2.5674762, -9.48959302, -4.02922645, -1.35416918, 9.67702323, # 1st highest value; idx. 27 -5.89478553, 1.85370467, ], # cummulative prob of 4 highest values <= 0.6 ], dtype=torch.float, device=torch_device, ) non_inf_expected_idx = torch.tensor( [[0, 0], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 20], [1, 27]], dtype=torch.long, device=torch_device, ) # expected non filtered idx as noted above non_inf_expected_output = torch.tensor( [ 8.2221, 8.4321, 7.4402, 9.3845, 6.2712, 8.8275, 7.3858, 9.6770, ], # expected non filtered values as noted above dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4) non_inf_output = output[output != -float("inf")].to(device=torch_device) non_inf_idx = (output != -float("inf")).nonzero().to(device=torch_device) self.assertTrue(torch.allclose(non_inf_expected_output, non_inf_output, atol=1e-12)) self.assertTrue(torch.all(torch.eq(non_inf_expected_idx, non_inf_idx))) # tests whether the function uses filter_value instead of default -inf def test_top_k_top_p_filtering_with_filter_value(self): logits = torch.tensor( [ [ 1, 1, 1, 0.99, # get filtered by top-p filtering 0.98, # get filtered by top-k filtering ] ], dtype=torch.float, device=torch_device, ) expected_output = torch.tensor( [[1, 1, 1, 0, 0]], dtype=torch.float, device=torch_device, ) output = top_k_top_p_filtering(logits, top_k=4, top_p=0.5, filter_value=0.0) self.assertTrue(torch.allclose(expected_output, output, atol=1e-12)) @require_torch class GenerationIntegrationTests(unittest.TestCase, GenerationIntegrationTestsMixin): # setting framework_dependent_parameters needs to be gated, just like its contents' imports if is_torch_available(): framework_dependent_parameters = { "AutoModelForCausalLM": AutoModelForCausalLM, "AutoModelForSpeechSeq2Seq": AutoModelForSpeechSeq2Seq, "AutoModelForSeq2SeqLM": AutoModelForSeq2SeqLM, "AutoModelForVision2Seq": AutoModelForVision2Seq, "LogitsProcessorList": LogitsProcessorList, "MinLengthLogitsProcessor": MinLengthLogitsProcessor, "create_tensor_fn": torch.tensor, "floats_tensor": floats_tensor, "return_tensors": "pt", } @slow def test_diverse_beam_search(self): # PT-only test: TF doesn't have a diverse beam search implementation article = """Justin Timberlake and Jessica Biel, welcome to parenthood. The celebrity couple announced the arrival of their son, Silas Randall Timberlake, in statements to People. "Silas was the middle name of Timberlake's maternal grandfather Bill Bomar, who died in 2012, while Randall is the musician's own middle name, as well as his father's first," People reports. The couple announced the pregnancy in January, with an Instagram post. It is the first baby for both.""" bart_tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn") bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) outputs = bart_model.generate( input_ids, num_beams=4, num_return_sequences=2, num_beam_groups=4, diversity_penalty=2.0, remove_invalid_values=True, ) generated_text = bart_tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The couple announced the birth of their son, Silas Randall Timberlake, in a statement. Silas was the" " middle name of Timberlake's maternal grandfather Bill Bomar. Randall is the musician's own middle" " name, as well as his father's first. It is the first baby for both of them.", "Justin Timberlake and Jessica Biel have a son. The baby is named Silas Randall Timberlake. It is the" " first child for both. The couple announced the pregnancy in January. The name Silas is the middle" " name of Timberlake's maternal grandfather. It's also his own middle name.", ], ) def test_max_length_backward_compat_greedy(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_sample(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) max_length = 20 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with torch.no_grad(): with self.assertWarns(UserWarning): bart_model.sample( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) def test_max_length_backward_compat_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 2 input_ids = input_ids.expand(2, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): _ = bart_model.beam_search( input_ids, num_beams=num_beams, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs ) def test_max_length_backward_compat_group_beam_search(self): # PT-only test: TF doesn't have StoppingCriteria & group beam search article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, num_beams=num_beams, max_length=max_length, **model_kwargs ) def test_max_length_warning_if_different(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) batch_size = 1 max_length = 20 num_beams = 6 num_beam_groups = 3 num_return_sequences = num_beams * batch_size stopping_criteria_max_length = 18 stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=stopping_criteria_max_length)]) # Greedy input_ids = input_ids.expand(6, -1) model_kwargs = bart_model._prepare_encoder_decoder_kwargs_for_generation(input_ids, {}) input_ids = bart_model._prepare_decoder_input_ids_for_generation( input_ids.shape[0], decoder_start_token_id=bart_model.config.decoder_start_token_id, bos_token_id=bart_model.config.bos_token_id, ) with self.assertWarns(UserWarning): bart_model.greedy_search( input_ids, max_length=max_length, pad_token_id=bart_model.config.pad_token_id, stopping_criteria=stopping_criteria, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Sample with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.sample( input_ids, max_length=max_length, stopping_criteria=stopping_criteria, pad_token_id=bart_model.config.pad_token_id, eos_token_id=bart_model.config.eos_token_id, **model_kwargs, ) # Beam beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, ) with self.assertWarns(UserWarning): with torch.no_grad(): bart_model.beam_search( input_ids, num_beams=num_beams, stopping_criteria=stopping_criteria, max_length=max_length, beam_scorer=beam_scorer, **model_kwargs, ) # Grouped beam search diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, num_beams=num_beams, device=torch_device, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) with self.assertWarns(UserWarning): bart_model.group_beam_search( input_ids, diverse_beam_scorer, stopping_criteria=stopping_criteria, num_beams=num_beams, max_length=max_length, **model_kwargs, ) def test_custom_stopping_criteria_overload_error(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) stopping_criteria = StoppingCriteriaList() stopping_criteria.append(MaxLengthCriteria(max_length=42)) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria) with self.assertRaises(ValueError): bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=32) def test_custom_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria article = """Justin Timberlake and Jessica Biel, welcome to parenthood.""" bart_tokenizer = BartTokenizer.from_pretrained("sshleifer/bart-tiny-random") bart_model = BartForConditionalGeneration.from_pretrained("sshleifer/bart-tiny-random").to(torch_device) input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) class DummyCriteria(StoppingCriteria): def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] >= 20 stopping_criteria = StoppingCriteriaList() stopping_criteria.append(DummyCriteria()) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=22).shape), [1, 20], ) self.assertEqual( list(bart_model.generate(input_ids, stopping_criteria=stopping_criteria, max_length=18).shape), [1, 18], ) def test_stop_sequence_stopping_criteria(self): # PT-only test: TF doesn't have StoppingCriteria prompt = """Hello I believe in""" generator = pipeline("text-generation", model="hf-internal-testing/tiny-random-bart") output = generator(prompt) self.assertEqual( output, [ { "generated_text": ( "Hello I believe in in in number number number number number number number number number" ) } ], ) output = generator(prompt, stop_sequence=" number") self.assertEqual(output, [{"generated_text": "Hello I believe in in in number"}]) def test_generate_non_nlp_input_ids_as_kwarg(self): # PT-only test: AFAIK there's no non-NLP model architecture in TF that supports `input_ids` as its only input model = ImageGPTForCausalImageModeling.from_pretrained( "hf-internal-testing/tiny-random-imagegpt", max_length=10 ).to(torch_device) input_ids = ids_tensor((3, 5), vocab_size=10) output_sequences_kwargs = model.generate(input_ids=input_ids).cpu() output_sequences = model.generate(input_ids).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (3, 10)) def test_generate_input_values_as_encoder_kwarg(self): # PT-only test: AFAIK there's no generate-capable architecture in TF that supports `input_values` as its input input_values = floats_tensor((2, 250)) model = SpeechEncoderDecoderModel.from_pretrained("hf-internal-testing/tiny-random-speech-encoder-decoder") model = model.to(torch_device) output_sequences_kwargs = model.generate(input_values=input_values, max_length=5).cpu() output_sequences = model.generate(input_values, max_length=5).cpu() self.assertListEqual(output_sequences.tolist(), output_sequences_kwargs.tolist()) self.assertEqual(output_sequences.shape, (2, 5)) def test_transition_scores_group_beam_search_encoder_decoder(self): # PT-only test: TF doesn't have group beam search articles = [ "Justin Timberlake and Jessica Biel, welcome to parenthood.", "Michael Phelps is arguably the most decorated Olympian of all time.", ] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained( "hf-internal-testing/tiny-random-bart", max_length=10, num_beams=2, num_beam_groups=2, num_return_sequences=2, eos_token_id=None, return_dict_in_generate=True, output_scores=True, length_penalty=0.0, ) model = model.to(torch_device) input_ids = tokenizer(articles, return_tensors="pt", padding=True).input_ids.to(torch_device) outputs = model.generate(input_ids=input_ids) transition_scores = model.compute_transition_scores(outputs.sequences, outputs.scores, outputs.beam_indices) transition_scores_sum = transition_scores.sum(-1) self.assertTrue(torch.allclose(transition_scores_sum, outputs.sequences_scores, atol=1e-3)) @slow def test_beam_search_example_integration(self): # PT-only test: TF doesn't have a BeamSearchScorer # exactly the example provided in the docstrings of beam search, which previously # failed after directly copying from it. Refer to PR #15555 tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 3 beams num_beams = 3 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } # instantiate beam scorer beam_scorer = BeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt bist du?"]) @slow def test_constrained_beam_search(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_tokens = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids force_tokens_2 = tokenizer("big weapons", add_prefix_space=True, add_special_tokens=False).input_ids constraints = [ PhrasalConstraint(force_tokens), PhrasalConstraint(force_tokens_2), ] starting_text = ["The soldiers were not prepared and"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, max_length=30, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers were not prepared and didn't know what to do. They had no idea how they would react if" " the enemy attacked them, big weapons scared" ], ) @slow def test_constrained_beam_search_mixed(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_phrase = tokenizer("scared", add_prefix_space=True, add_special_tokens=False).input_ids flexible_phrases = tokenizer( ["scream", "screams", "screaming", "screamed"], add_prefix_space=True, add_special_tokens=False ).input_ids constraints = [ PhrasalConstraint(force_phrase), DisjunctiveConstraint(flexible_phrases), ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, constraints=constraints, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, # max_length=20, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_mixed_mixin(self): # PT-only test: TF doesn't have constrained beam search model = GPT2LMHeadModel.from_pretrained("gpt2").to(torch_device) tokenizer = GPT2Tokenizer.from_pretrained("gpt2") force_word = "scared" force_flexible = ["scream", "screams", "screaming", "screamed"] force_words_ids = [ tokenizer([force_word], add_prefix_space=True, add_special_tokens=False).input_ids, tokenizer(force_flexible, add_prefix_space=True, add_special_tokens=False).input_ids, ] starting_text = ["The soldiers", "The child"] input_ids = tokenizer(starting_text, return_tensors="pt").input_ids.to(torch_device) outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) generated_text = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual( generated_text, [ "The soldiers, who had been stationed at the base for more than a year before being evacuated" " screaming scared", "The child was taken to a local hospital where he died.\n 'I don't think screaming scared", ], ) @slow def test_constrained_beam_search_example_translation_mixin(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" force_words = ["sind"] input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids force_words_ids = tokenizer(force_words, add_special_tokens=False).input_ids outputs = model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) @slow def test_constrained_beam_search_example_integration(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("t5-base") model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") encoder_input_str = "translate English to German: How old are you?" encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids # lets run beam search using 5 beams num_beams = 5 # define decoder start token ids input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) input_ids = input_ids * model.config.decoder_start_token_id # add encoder_outputs to model keyword arguments model_kwargs = { "encoder_outputs": model.get_encoder()( encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True ) } constraint_str = "sind" constraint_token_ids = tokenizer.encode(constraint_str)[:-1] # remove eos token constraints = [PhrasalConstraint(token_ids=constraint_token_ids)] # instantiate beam scorer beam_scorer = ConstrainedBeamSearchScorer( batch_size=1, num_beams=num_beams, device=model.device, constraints=constraints ) # instantiate logits processors logits_processor = LogitsProcessorList( [ MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ] ) outputs = model.constrained_beam_search( input_ids, beam_scorer, constraints=constraints, logits_processor=logits_processor, **model_kwargs ) outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True) self.assertListEqual(outputs, ["Wie alt sind Sie?"]) def test_constrained_beam_search_mixin_type_checks(self): # PT-only test: TF doesn't have constrained beam search tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/t5-tiny-random") model = AutoModelForSeq2SeqLM.from_pretrained("patrickvonplaten/t5-tiny-random") encoder_input_str = "translate English to German: How old are you?" input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = tokenizer(force_words, return_tensors="pt").input_ids model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): force_words = ["sind"] force_words_ids = [tokenizer(force_words, return_tensors="pt").input_ids] model.generate( input_ids, force_words_ids=force_words_ids, num_beams=10, num_return_sequences=1, no_repeat_ngram_size=1, remove_invalid_values=True, ) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[-1]]) with self.assertRaises(ValueError): model.generate(input_ids, force_words_ids=[[[-1]]]) def test_contrastive_search_batched(self): # PT-only test: TF doesn't have constrained beam search # Tests that contrastive search works with batched inputs (i.e. has the same output as for non-batched inputs) articles = ["Foo", "Bar Baz"] tokenizer = BartTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) model.config.eos_token_id = None input_ids_batched = tokenizer(articles, padding=True, return_tensors="pt").input_ids.to(torch_device) input_ids = tokenizer(articles[1], return_tensors="pt").input_ids.to(torch_device) output_sequences_batched = model.generate( input_ids=input_ids_batched, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) output_sequences = model.generate( input_ids=input_ids, penalty_alpha=0.6, top_k=4, return_dict_in_generate=True, output_scores=True ) batched_out = tokenizer.decode(output_sequences_batched.sequences[1], skip_special_tokens=True) out = tokenizer.decode(output_sequences.sequences[0], skip_special_tokens=True) self.assertEqual(batched_out, out) # output_sequences_batched.scores[0][1] -> 1st set of logits, 2nd sequence max_score_diff = (output_sequences_batched.scores[0][1] - output_sequences.scores[0][0]).abs().max() self.assertTrue(max_score_diff < 1e-5) def test_eos_token_id_int_and_list_top_k_top_sampling(self): # Has TF equivalent: this test relies on random sampling generation_kwargs = { "do_sample": True, "num_beams": 1, "top_p": 0.7, "top_k": 10, "temperature": 0.7, } expectation = 15 tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") text = """Hello, my dog is cute and""" tokens = tokenizer(text, return_tensors="pt").to(torch_device) model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) torch.manual_seed(0) eos_token_id = 846 generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) torch.manual_seed(0) eos_token_id = [846, 198] generated_tokens = model.generate(**tokens, eos_token_id=eos_token_id, **generation_kwargs) self.assertTrue(expectation == len(generated_tokens[0])) def test_generate_from_inputs_embeds_decoder_only(self): # PT-only test: TF doesn't have a model with support to generate from input embeds (yet ;)) # Note: the model must support generation from input embeddings model = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(torch_device) tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2") model.config.pad_token_id = tokenizer.eos_token_id text = "Hello world" tokenized_inputs = tokenizer([text, text], return_tensors="pt") input_ids = tokenized_inputs.input_ids.to(torch_device) # Traditional way of generating text outputs_from_ids = model.generate(input_ids) self.assertEqual(outputs_from_ids.shape, (2, 20)) # Same thing, but from input embeddings inputs_embeds = model.transformer.wte(input_ids) outputs_from_embeds = model.generate(input_ids, inputs_embeds=inputs_embeds) self.assertListEqual(outputs_from_ids.tolist(), outputs_from_embeds.tolist()) # But if we pass different inputs_embeds, we should get different outputs torch.manual_seed(0) random_embeds = torch.rand_like(inputs_embeds) outputs_from_rand_embeds = model.generate(input_ids, inputs_embeds=random_embeds) with self.assertRaises(AssertionError): self.assertListEqual(outputs_from_rand_embeds.tolist(), outputs_from_embeds.tolist()) # input_ids is not a required input -- if we don't pass it, the newly generated tokens will be the same outputs_from_embeds_wo_ids = model.generate( inputs_embeds=inputs_embeds, max_new_tokens=20 - inputs_embeds.shape[1] ) self.assertListEqual( outputs_from_embeds[:, inputs_embeds.shape[1] :].tolist(), outputs_from_embeds_wo_ids[:, 1:].tolist(), ) def test_model_kwarg_encoder_signature_filtering(self): # Has TF equivalent: ample use of framework-specific code bart_tokenizer = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-bart") article = """Hugging Face is a technology company based in New York and Paris.""" input_ids = bart_tokenizer(article, return_tensors="pt").input_ids.to(torch_device) bart_model = BartForConditionalGeneration.from_pretrained("hf-internal-testing/tiny-random-bart").to( torch_device ) output = bart_model.generate(input_ids).cpu().numpy() # Let's create a fake model that has a different signature. In particular, this fake model accepts "foo" as an # argument. Because "foo" is not in the encoder signature and doesn't start with "decoder_", it will be part of # the encoder kwargs prior to signature filtering, which would lead to an exception. But filtering kicks in and # saves the day. class FakeBart(BartForConditionalGeneration): def forward(self, input_ids, foo=None, **kwargs): return super().forward(input_ids, **kwargs) bart_model = FakeBart.from_pretrained("hf-internal-testing/tiny-random-bart").to(torch_device) fake_output = bart_model.generate(input_ids, foo="bar").cpu().numpy() self.assertTrue(np.array_equal(output, fake_output)) # Encoder signature filtering only kicks in if it doesn't accept wildcard kwargs. The following test will fail # because it doesn't do signature filtering. class FakeEncoder(bart_model.model.encoder.__class__): def forward(self, input_ids, **kwargs): return super().forward(input_ids, **kwargs) fake_encoder = FakeEncoder(bart_model.config, bart_model.model.shared).to(torch_device) bart_model.model.encoder = fake_encoder # Normal generation still works (the output will be different because the encoder weights are different) fake_output = bart_model.generate(input_ids).cpu().numpy() with self.assertRaises(TypeError): # FakeEncoder.forward() accepts **kwargs -> no filtering -> type error due to unexpected input "foo" bart_model.generate(input_ids, foo="bar")
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/mobilevit/test_image_processing_mobilevit.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class MobileViTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_flip_channel_order=True, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_flip_channel_order = do_flip_channel_order def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class MobileViTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = MobileViTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = MobileViTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_flip_channel_order")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), )
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class MobileViTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_center_crop=True, crop_size=None, do_flip_channel_order=True, ): size = size if size is not None else {"shortest_edge": 20} crop_size = crop_size if crop_size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_flip_channel_order = do_flip_channel_order def prepare_image_processor_dict(self): return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class MobileViTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = MobileViTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = MobileViTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) self.assertTrue(hasattr(image_processing, "do_center_crop")) self.assertTrue(hasattr(image_processing, "center_crop")) self.assertTrue(hasattr(image_processing, "do_flip_channel_order")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"shortest_edge": 20}) self.assertEqual(image_processor.crop_size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42, crop_size=84) self.assertEqual(image_processor.size, {"shortest_edge": 42}) self.assertEqual(image_processor.crop_size, {"height": 84, "width": 84}) def test_batch_feature(self): pass def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ), )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/bert/test_modeling_flax_bert.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class FlaxBertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class FlaxBertModelTest(FlaxModelTesterMixin, unittest.TestCase): test_head_masking = True all_model_classes = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxBertModelTester(self) @slow def test_model_from_pretrained(self): # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. model = FlaxBertModel.from_pretrained("bert-base-cased") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers import BertConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): from transformers.models.bert.modeling_flax_bert import ( FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForNextSentencePrediction, FlaxBertForPreTraining, FlaxBertForQuestionAnswering, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertModel, ) class FlaxBertModelTester(unittest.TestCase): def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_attention_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_choices=4, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_choices = num_choices def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, attention_mask def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask} return config, inputs_dict def prepare_config_and_inputs_for_decoder(self): config_and_inputs = self.prepare_config_and_inputs() config, input_ids, token_type_ids, attention_mask = config_and_inputs config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, ) @require_flax class FlaxBertModelTest(FlaxModelTesterMixin, unittest.TestCase): test_head_masking = True all_model_classes = ( ( FlaxBertModel, FlaxBertForPreTraining, FlaxBertForMaskedLM, FlaxBertForMultipleChoice, FlaxBertForQuestionAnswering, FlaxBertForNextSentencePrediction, FlaxBertForSequenceClassification, FlaxBertForTokenClassification, FlaxBertForQuestionAnswering, ) if is_flax_available() else () ) def setUp(self): self.model_tester = FlaxBertModelTester(self) @slow def test_model_from_pretrained(self): # Only check this for base model, not necessary for all model classes. # This will also help speed-up tests. model = FlaxBertModel.from_pretrained("bert-base-cased") outputs = model(np.ones((1, 1))) self.assertIsNotNone(outputs)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/test_modeling_flax_common.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import json import random import tempfile import unittest from typing import List, Tuple import numpy as np from huggingface_hub import HfFolder, delete_repo, set_access_token from requests.exceptions import HTTPError import transformers from transformers import BertConfig, is_flax_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, is_pt_flax_cross_test, is_staging_test, require_flax, torch_device, ) from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput if is_flax_available(): import os import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict from transformers import ( FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForSequenceClassification, FlaxBertModel, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key: setattr(configs_no_init, key, 1e-10) return configs_no_init def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return np.array(values, dtype=jnp.float32).reshape(shape) def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask def get_params(params, from_head_prefix=None): """Function extracts relevant parameters into flatten dict from model params, appends batch normalization statistics if present""" # If Both parameters and batch normalization statistics are present if "batch_stats" in params: # Extract only parameters for the specified head prefix (if specified) and add batch statistics if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params["params"][from_head_prefix])) extracted_params.update(flatten_dict(params["batch_stats"][from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params["params"])) extracted_params.update(flatten_dict(params["batch_stats"])) # Only parameters are present else: if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params[from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params)) return extracted_params @require_flax class FlaxModelTesterMixin: model_tester = None all_model_classes = () test_mismatched_shapes = True is_encoder_decoder = False test_head_masking = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class): inputs_dict = copy.deepcopy(inputs_dict) # hack for now until we have AutoModel classes if "ForMultipleChoice" in model_class.__name__: inputs_dict = { k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1])) if isinstance(v, (jnp.ndarray, np.ndarray)) else v for k, v in inputs_dict.items() } return inputs_dict def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) # (Copied from tests.test_modeling_common.ModelTesterMixin.check_pt_flax_outputs) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): # It might be better to put this inside the for loop below (because we modify the config there). # But logically, it is fine. config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() # verify that normal save_pretrained works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) # verify that save_pretrained for distributed training # with `params=params` works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_naming_convention(self): for model_class in self.all_model_classes: model_class_name = model_class.__name__ module_class_name = ( model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module" ) bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name]) module_cls = getattr(bert_modeling_flax_module, module_class_name) self.assertIsNotNone(module_cls) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: new_model = FlaxAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs_dict)["logits"] self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = FlaxAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_default_params_dtype(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # check if all params are still in float32 when dtype of computation is half-precision model = model_class(config, dtype=jnp.float16) types = jax.tree_util.tree_map(lambda x: x.dtype, model.params) types = flatten_dict(types) for name, type_ in types.items(): self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.") def test_to_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to bf16 params = model.to_bf16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_bf16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_to_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 params = model.to_fp16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_fp16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_to_fp32(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 and back to fp32 params = model.to_fp16(model.params) params = model.to_fp32(params) # test if all params are in fp32 types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) # cast to fp16 and back to fp32 with mask params = model.to_fp16(model.params) params = model.to_fp32(params, mask) # test if all params are in fp32 except key types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.") else: self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") def test_save_load_in_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to fp16 and save params = model.to_fp16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_save_load_in_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to bf16 and save params = model.to_bf16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "__call__")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_headmasking(self): if not self.test_head_masking: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)]) if i == num_hidden_layers - 1: return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)]) return np.ones(attention_heads, dtype=jnp.int32) for model_class in self.all_model_classes: model = model_class(config) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False inputs = self._prepare_for_class(inputs_dict, model_class).copy() # Prepare head mask inputs["head_mask"] = np.stack( [ _prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ] ) outputs = model(**inputs) def _check_attentions_validity(attentions): # Remove NaN for t in attentions: # Check we don't have more than 25% nans (arbitrary) self.assertLess(np.isnan(t).sum(), t.size / 4) attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions] self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0) if model.config.is_encoder_decoder: raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.") else: _check_attentions_validity(outputs.attentions) def test_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: model = model_class(config, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if we params can be properly initialized when calling init_weights params = model.init_weights(model.key, model.input_shape) self.assertIsInstance(params, FrozenDict) # Check if all required parmas are initialized keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if we can do a forward pass inputs_dict["output_hidden_states"] = True inputs = self._prepare_for_class(inputs_dict, model_class).copy() model(**inputs, params=params) def test_from_pretrained_with_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _assert_all_params_initialised(model, params): # Check if all required parmas are loaded keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) for model_class in self.all_model_classes: # init the model model = model_class(config) # save the model in the temporary directory # load the saved model with _do_init=False with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if all required parmas are loaded _assert_all_params_initialised(model, params) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if init_weights initializes missing keys from from_pretrained flat_params = flatten_dict(unfreeze(params)) random_key = random.choice(list(flat_params.keys())) flat_params.pop(random_key) params = freeze(unflatten_dict(flat_params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) params = model.init_weights(model.key, model.input_shape, params=params) # Check if all required parmas are loaded _assert_all_params_initialised(model, params) def test_checkpoint_sharding_from_hub(self): model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()): assert np.allclose(np.array(p1), np.array(p2)) def test_checkpoint_sharding_local(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".msgpack"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: with open(shard_file, "rb") as state_f: state_file = from_bytes(FlaxBertModel, state_f.read()) self.assertEqual(len(state_file), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".msgpack")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = FlaxBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()): self.assertTrue(np.allclose(np.array(p1), np.array(p2))) @is_pt_flax_cross_test def test_from_sharded_pt(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only") for key, ref_val in flatten_dict(ref_model.params).items(): val = flatten_dict(model.params)[key] assert np.allclose(np.array(val), np.array(ref_val)) def test_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) remat_model = model_class(config) try: remat_model.enable_gradient_checkpointing() except NotImplementedError: continue outputs = model(**prepared_inputs_dict) remat_outputs = remat_model(**prepared_inputs_dict) # ensure that the dicts of outputs contain the same keys self.assertEqual(outputs.keys(), remat_outputs.keys()) outputs = outputs.to_tuple() remat_outputs = remat_outputs.to_tuple() # ensure that the outputs remain precisely equal for output, remat_output in zip(outputs, remat_outputs): self.assertTrue((output == remat_output).all()) @require_flax @is_staging_test class FlaxModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN set_access_token(TOKEN) HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model-flax") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-flax-org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("test-model-flax", use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # Reset repo delete_repo(token=self._token, repo_id="test-model-flax") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("valid_org/test-model-flax-org", use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-flax-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, use_auth_token=self._token ) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def check_models_equal(model1, model2): models_are_equal = True flat_params_1 = flatten_dict(model1.params) flat_params_2 = flatten_dict(model2.params) for key in flat_params_1.keys(): if np.sum(np.abs(flat_params_1[key] - flat_params_2[key])) > 1e-4: models_are_equal = False return models_are_equal @require_flax class FlaxModelUtilsTest(unittest.TestCase): def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model)
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import inspect import json import random import tempfile import unittest from typing import List, Tuple import numpy as np from huggingface_hub import HfFolder, delete_repo, set_access_token from requests.exceptions import HTTPError import transformers from transformers import BertConfig, is_flax_available, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import ( TOKEN, USER, CaptureLogger, is_pt_flax_cross_test, is_staging_test, require_flax, torch_device, ) from transformers.utils import CONFIG_NAME, GENERATION_CONFIG_NAME, logging from transformers.utils.generic import ModelOutput if is_flax_available(): import os import jax import jax.numpy as jnp from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.serialization import from_bytes from flax.traverse_util import flatten_dict, unflatten_dict from transformers import ( FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, FLAX_MODEL_MAPPING, FlaxAutoModel, FlaxAutoModelForSequenceClassification, FlaxBertModel, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) from transformers.modeling_flax_utils import FLAX_WEIGHTS_INDEX_NAME, FLAX_WEIGHTS_NAME os.environ["XLA_PYTHON_CLIENT_MEM_FRACTION"] = "0.12" # assumed parallelism: 8 if is_torch_available(): import torch def _config_zero_init(config): configs_no_init = copy.deepcopy(config) for key in configs_no_init.__dict__.keys(): if "_range" in key or "_std" in key or "initializer_factor" in key: setattr(configs_no_init, key, 1e-10) return configs_no_init def ids_tensor(shape, vocab_size, rng=None): """Creates a random int32 tensor of the shape within the vocab size.""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.randint(0, vocab_size - 1)) output = np.array(values, dtype=jnp.int32).reshape(shape) return output def floats_tensor(shape, scale=1.0, rng=None, name=None): """Creates a random float32 tensor""" if rng is None: rng = random.Random() total_dims = 1 for dim in shape: total_dims *= dim values = [] for _ in range(total_dims): values.append(rng.random() * scale) return np.array(values, dtype=jnp.float32).reshape(shape) def random_attention_mask(shape, rng=None): attn_mask = ids_tensor(shape, vocab_size=2, rng=rng) # make sure that at least one token is attended to for each batch attn_mask[:, -1] = 1 return attn_mask def get_params(params, from_head_prefix=None): """Function extracts relevant parameters into flatten dict from model params, appends batch normalization statistics if present""" # If Both parameters and batch normalization statistics are present if "batch_stats" in params: # Extract only parameters for the specified head prefix (if specified) and add batch statistics if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params["params"][from_head_prefix])) extracted_params.update(flatten_dict(params["batch_stats"][from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params["params"])) extracted_params.update(flatten_dict(params["batch_stats"])) # Only parameters are present else: if from_head_prefix is not None: extracted_params = flatten_dict(unfreeze(params[from_head_prefix])) else: extracted_params = flatten_dict(unfreeze(params)) return extracted_params @require_flax class FlaxModelTesterMixin: model_tester = None all_model_classes = () test_mismatched_shapes = True is_encoder_decoder = False test_head_masking = False has_attentions = True def _prepare_for_class(self, inputs_dict, model_class): inputs_dict = copy.deepcopy(inputs_dict) # hack for now until we have AutoModel classes if "ForMultipleChoice" in model_class.__name__: inputs_dict = { k: jnp.broadcast_to(v[:, None], (v.shape[0], self.model_tester.num_choices, v.shape[-1])) if isinstance(v, (jnp.ndarray, np.ndarray)) else v for k, v in inputs_dict.items() } return inputs_dict def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float): diff = np.abs((a - b)).max() self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).") def test_model_outputs_equivalence(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}): tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs) dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple() def recursive_check(tuple_object, dict_object): if isinstance(tuple_object, (List, Tuple)): for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object): recursive_check(tuple_iterable_value, dict_iterable_value) elif tuple_object is None: return else: self.assert_almost_equals(jnp.nan_to_num(tuple_object), jnp.nan_to_num(dict_object), 1e-5) recursive_check(tuple_output, dict_output) for model_class in self.all_model_classes: model = model_class(config) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs) tuple_inputs = self._prepare_for_class(inputs_dict, model_class) dict_inputs = self._prepare_for_class(inputs_dict, model_class) check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True}) # (Copied from tests.test_modeling_common.ModelTesterMixin.check_pt_flax_outputs) def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None): """ Args: model_class: The class of the model that is currently testing. For example, ..., etc. Currently unused, but it could make debugging easier and faster. names: A string, or a list of strings. These specify what fx_outputs/pt_outputs represent in the model outputs. Currently unused, but in the future, we could use this information to make the error message clearer by giving the name(s) of the output tensor(s) with large difference(s) between PT and Flax. """ self.assertEqual(type(name), str) if attributes is not None: self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`") # Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`). if isinstance(fx_outputs, ModelOutput): self.assertTrue( isinstance(pt_outputs, ModelOutput), f"{name}: `pt_outputs` should an instance of `ModelOutput` when `fx_outputs` is", ) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys, f"{name}: Output keys differ between Flax and PyTorch") # convert to the case of `tuple` # appending each key to the current (string) `name` attributes = tuple([f"{name}.{k}" for k in fx_keys]) self.check_pt_flax_outputs( fx_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes ) # Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.) elif type(fx_outputs) in [tuple, list]: self.assertEqual( type(fx_outputs), type(pt_outputs), f"{name}: Output types differ between Flax and PyTorch" ) self.assertEqual( len(fx_outputs), len(pt_outputs), f"{name}: Output lengths differ between Flax and PyTorch" ) if attributes is not None: # case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`) self.assertEqual( len(attributes), len(fx_outputs), f"{name}: The tuple `attributes` should have the same length as `fx_outputs`", ) else: # case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name` attributes = tuple([f"{name}_{idx}" for idx in range(len(fx_outputs))]) for fx_output, pt_output, attr in zip(fx_outputs, pt_outputs, attributes): self.check_pt_flax_outputs(fx_output, pt_output, model_class, tol=tol, name=attr) elif isinstance(fx_outputs, jnp.ndarray): self.assertTrue( isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `fx_outputs` is" ) # Using `np.asarray` gives `ValueError: assignment destination is read-only` at the line `fx_outputs[fx_nans] = 0`. fx_outputs = np.array(fx_outputs) pt_outputs = pt_outputs.detach().to("cpu").numpy() self.assertEqual( fx_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between Flax and PyTorch" ) # deal with NumPy's scalars to make replacing nan values by 0 work. if np.isscalar(fx_outputs): fx_outputs = np.array([fx_outputs]) pt_outputs = np.array([pt_outputs]) fx_nans = np.isnan(fx_outputs) pt_nans = np.isnan(pt_outputs) pt_outputs[fx_nans] = 0 fx_outputs[fx_nans] = 0 pt_outputs[pt_nans] = 0 fx_outputs[pt_nans] = 0 max_diff = np.amax(np.abs(fx_outputs - pt_outputs)) self.assertLessEqual( max_diff, tol, f"{name}: Difference between PyTorch and Flax is {max_diff} (>= {tol})." ) else: raise ValueError( "`fx_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `jnp.ndarray`. Got" f" {type(fx_outputs)} instead." ) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): # It might be better to put this inside the for loop below (because we modify the config there). # But logically, it is fine. config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = model_class.from_pretrained(tmpdirname, from_pt=True) fx_outputs_loaded = fx_model_loaded(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) pt_inputs = {k: torch.tensor(v.tolist(), device=torch_device) for k, v in prepared_inputs_dict.items()} # load corresponding PyTorch class pt_model_class_name = model_class.__name__[4:] # Skip the "Flax" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) pt_model = pt_model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False fx_model = model_class(config, dtype=jnp.float32) pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**prepared_inputs_dict) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = pt_model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_from_pretrained_save_pretrained(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): model = model_class(config) prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) outputs = model(**prepared_inputs_dict).to_tuple() # verify that normal save_pretrained works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) # the config file (and the generation config file, if it can generate) should be saved self.assertTrue(os.path.exists(os.path.join(tmpdirname, CONFIG_NAME))) self.assertEqual( model.can_generate(), os.path.exists(os.path.join(tmpdirname, GENERATION_CONFIG_NAME)) ) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) # verify that save_pretrained for distributed training # with `params=params` works as expected with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=model.params) model_loaded = model_class.from_pretrained(tmpdirname) outputs_loaded = model_loaded(**prepared_inputs_dict).to_tuple() for output_loaded, output in zip(outputs_loaded, outputs): self.assert_almost_equals(output_loaded, output, 1e-3) def test_save_load_from_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_save_load_to_base(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_from_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = base_class(config) base_params = get_params(model.params) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, base_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: # save pt model pt_model.save_pretrained(tmpdirname) head_model = model_class.from_pretrained(tmpdirname, from_pt=True) base_param_from_head = get_params(head_model.params, from_head_prefix=head_model.base_model_prefix) for key in base_param_from_head.keys(): max_diff = (base_params[key] - base_param_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") @is_pt_flax_cross_test def test_save_load_bf16_to_base_pt(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() base_class = FLAX_MODEL_MAPPING[config.__class__] for model_class in self.all_model_classes: if model_class == base_class: continue model = model_class(config) model.params = model.to_bf16(model.params) base_params_from_head = get_params(model.params, from_head_prefix=model.base_model_prefix) # convert Flax model to PyTorch model pt_model_class = getattr(transformers, model_class.__name__[4:]) # Skip the "Flax" at the beginning pt_model = pt_model_class(config).eval() pt_model = load_flax_weights_in_pytorch_model(pt_model, model.params) # check that all base model weights are loaded correctly with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) base_model = base_class.from_pretrained(tmpdirname, from_pt=True) base_params = get_params(base_model.params) for key in base_params_from_head.keys(): max_diff = (base_params[key] - base_params_from_head[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_jit_compilation(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__): prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) @jax.jit def model_jitted(input_ids, attention_mask=None, **kwargs): return model(input_ids=input_ids, attention_mask=attention_mask, **kwargs) with self.subTest("JIT Enabled"): jitted_outputs = model_jitted(**prepared_inputs_dict).to_tuple() with self.subTest("JIT Disabled"): with jax.disable_jit(): outputs = model_jitted(**prepared_inputs_dict).to_tuple() self.assertEqual(len(outputs), len(jitted_outputs)) for jitted_output, output in zip(jitted_outputs, outputs): self.assertEqual(jitted_output.shape, output.shape) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.__call__) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] if model.config.is_encoder_decoder: expected_arg_names = [ "input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) else: expected_arg_names = ["input_ids", "attention_mask"] self.assertListEqual(arg_names[:2], expected_arg_names) def test_naming_convention(self): for model_class in self.all_model_classes: model_class_name = model_class.__name__ module_class_name = ( model_class_name[:-5] + "Module" if model_class_name[-5:] == "Model" else model_class_name + "Module" ) bert_modeling_flax_module = __import__(model_class.__module__, fromlist=[module_class_name]) module_cls = getattr(bert_modeling_flax_module, module_class_name) self.assertIsNotNone(module_cls) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_len) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): if not self.has_attentions: self.skipTest(reason="Model does not output attentions") config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_length = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", seq_length) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_length) decoder_key_length = getattr(self.model_tester, "decoder_key_length", decoder_seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) out_len = len(outputs) if self.is_encoder_decoder: correct_outlen = 5 # Question Answering model returns start_logits and end_logits if model_class in get_values(FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING): correct_outlen += 1 # start_logits and end_logits instead of only 1 output self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types elif self.is_encoder_decoder: added_hidden_states = 2 else: added_hidden_states = 1 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], ) def test_load_with_mismatched_shapes(self): if not self.test_mismatched_shapes: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: if model_class not in get_values(FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING): continue with self.subTest(msg=f"Testing {model_class}"): with tempfile.TemporaryDirectory() as tmp_dir: model = model_class(config) model.save_pretrained(tmp_dir) # Fails when we don't set ignore_mismatched_sizes=True with self.assertRaises(ValueError): new_model = FlaxAutoModelForSequenceClassification.from_pretrained(tmp_dir, num_labels=42) with self.assertRaises(ValueError): new_model_without_prefix = FlaxAutoModel.from_pretrained(tmp_dir, vocab_size=10) logger = logging.get_logger("transformers.modeling_flax_utils") with CaptureLogger(logger) as cl: new_model = FlaxAutoModelForSequenceClassification.from_pretrained( tmp_dir, num_labels=42, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) logits = new_model(**inputs_dict)["logits"] self.assertEqual(logits.shape[1], 42) with CaptureLogger(logger) as cl: new_model_without_prefix = FlaxAutoModel.from_pretrained( tmp_dir, vocab_size=10, ignore_mismatched_sizes=True ) self.assertIn("the shapes did not match", cl.out) input_ids = ids_tensor((2, 8), 10) if self.is_encoder_decoder: new_model_without_prefix(input_ids, decoder_input_ids=input_ids) else: new_model_without_prefix(input_ids) def test_default_params_dtype(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # check if all params are still in float32 when dtype of computation is half-precision model = model_class(config, dtype=jnp.float16) types = jax.tree_util.tree_map(lambda x: x.dtype, model.params) types = flatten_dict(types) for name, type_ in types.items(): self.assertEquals(type_, jnp.float32, msg=f"param {name} is not initialized in fp32.") def test_to_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to bf16 params = model.to_bf16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_bf16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in bf16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_to_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 params = model.to_fp16(model.params) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) params = model.to_fp16(model.params, mask) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) # test if all params are in fp16 except key for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float32, msg=f"param {name} should be in fp32.") else: self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_to_fp32(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # cast all params to fp16 and back to fp32 params = model.to_fp16(model.params) params = model.to_fp32(params) # test if all params are in fp32 types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") # test masking flat_params = flatten_dict(params) key = random.choice(list(flat_params.keys())) # choose a random param mask = {path: path != key for path in flat_params} # don't cast the key mask = unflatten_dict(mask) # cast to fp16 and back to fp32 with mask params = model.to_fp16(model.params) params = model.to_fp32(params, mask) # test if all params are in fp32 except key types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, params)) for name, type_ in types.items(): if name == key: self.assertEqual(type_, jnp.float16, msg=f"param {name} should be in fp16.") else: self.assertEqual(type_, jnp.float32, msg=f"param {name} is not in fp32.") def test_save_load_in_fp16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to fp16 and save params = model.to_fp16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.float16, msg=f"param {name} is not in fp16.") def test_save_load_in_bf16(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) # convert weights to bf16 and save params = model.to_bf16(model.params) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) # load the weights again and check if they are still in fp16 model = model_class.from_pretrained(tmpdirname) types = flatten_dict(jax.tree_util.tree_map(lambda x: x.dtype, model.params)) for name, type_ in types.items(): self.assertEqual(type_, jnp.bfloat16, msg=f"param {name} is not in bf16.") def test_model_main_input_name(self): for model_class in self.all_model_classes: model_signature = inspect.signature(getattr(model_class, "__call__")) # The main input is the name of the argument after `self` observed_main_input_name = list(model_signature.parameters.keys())[1] self.assertEqual(model_class.main_input_name, observed_main_input_name) def test_headmasking(self): if not self.test_head_masking: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _prepare_layer_head_mask(i, attention_heads, num_hidden_layers): if i == 0: return np.concatenate([np.zeros(1, dtype=jnp.int32), np.ones(attention_heads - 1, dtype=jnp.int32)]) if i == num_hidden_layers - 1: return np.concatenate([np.zeros(attention_heads - 1, dtype=jnp.int32), np.ones(1, dtype=jnp.int32)]) return np.ones(attention_heads, dtype=jnp.int32) for model_class in self.all_model_classes: model = model_class(config) inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False inputs = self._prepare_for_class(inputs_dict, model_class).copy() # Prepare head mask inputs["head_mask"] = np.stack( [ _prepare_layer_head_mask(i, config.num_attention_heads, config.num_hidden_layers) for i in range(config.num_hidden_layers) ] ) outputs = model(**inputs) def _check_attentions_validity(attentions): # Remove NaN for t in attentions: # Check we don't have more than 25% nans (arbitrary) self.assertLess(np.isnan(t).sum(), t.size / 4) attentions = [np.where(np.isnan(t), 0.0, t) for t in attentions] self.assertAlmostEqual(attentions[0][..., 0, :, :].sum(), 0.0) self.assertNotEqual(attentions[0][..., -1, :, :].sum(), 0.0) if len(attentions) > 2: # encoder-decodere models have only 2 layers in each modules self.assertNotEqual(attentions[1][..., 0, :, :].sum(), 0.0) self.assertAlmostEqual(attentions[-1][..., -2, :, :].sum(), 0.0) self.assertNotEqual(attentions[-1][..., -1, :, :].sum(), 0.0) if model.config.is_encoder_decoder: raise NotImplementedError("The test has not been implemented for encoder-decoder models yet.") else: _check_attentions_validity(outputs.attentions) def test_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: model = model_class(config, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if we params can be properly initialized when calling init_weights params = model.init_weights(model.key, model.input_shape) self.assertIsInstance(params, FrozenDict) # Check if all required parmas are initialized keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if we can do a forward pass inputs_dict["output_hidden_states"] = True inputs = self._prepare_for_class(inputs_dict, model_class).copy() model(**inputs, params=params) def test_from_pretrained_with_no_automatic_init(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True def _assert_all_params_initialised(model, params): # Check if all required parmas are loaded keys = set(flatten_dict(unfreeze(params)).keys()) self.assertTrue(all(k in keys for k in model.required_params)) # Check if the shapes match flat_params = flatten_dict(unfreeze(params)) for k, v in flatten_dict(unfreeze(model.params_shape_tree)).items(): self.assertEqual( v.shape, flat_params[k].shape, "Shapes of {} do not match. Expecting {}, got {}.".format(k, v.shape, flat_params[k].shape), ) for model_class in self.all_model_classes: # init the model model = model_class(config) # save the model in the temporary directory # load the saved model with _do_init=False with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) # Check that accesing parmas raises an ValueError when _do_init is False with self.assertRaises(ValueError): params = model.params # Check if all required parmas are loaded _assert_all_params_initialised(model, params) # Check that setting params raises an ValueError when _do_init is False with self.assertRaises(ValueError): model.params = params # Check if init_weights initializes missing keys from from_pretrained flat_params = flatten_dict(unfreeze(params)) random_key = random.choice(list(flat_params.keys())) flat_params.pop(random_key) params = freeze(unflatten_dict(flat_params)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, params=params) model, params = model_class.from_pretrained(tmpdirname, _do_init=False) params = model.init_weights(model.key, model.input_shape, params=params) # Check if all required parmas are loaded _assert_all_params_initialised(model, params) def test_checkpoint_sharding_from_hub(self): model = FlaxBertModel.from_pretrained("ArthurZ/flax-tiny-random-bert-sharded") # the model above is the same as the model below, just a sharded version. ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(ref_model.params).values()): assert np.allclose(np.array(p1), np.array(p2)) def test_checkpoint_sharding_local(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-bert-flax-only") with tempfile.TemporaryDirectory() as tmp_dir: # We use the same folder for various sizes to make sure a new save erases the old checkpoint. for max_size in ["150kB", "150kiB", "200kB", "200kiB"]: model.save_pretrained(tmp_dir, max_shard_size=max_size) # Get each shard file and its size shard_to_size = {} for shard in os.listdir(tmp_dir): if shard.endswith(".msgpack"): shard_file = os.path.join(tmp_dir, shard) shard_to_size[shard_file] = os.path.getsize(shard_file) index_file = os.path.join(tmp_dir, FLAX_WEIGHTS_INDEX_NAME) # Check there is an index but no regular weight file self.assertTrue(os.path.isfile(index_file)) self.assertFalse(os.path.isfile(os.path.join(tmp_dir, FLAX_WEIGHTS_NAME))) # Check a file is bigger than max_size only when it has a single weight for shard_file, size in shard_to_size.items(): if max_size.endswith("kiB"): max_size_int = int(max_size[:-3]) * 2**10 else: max_size_int = int(max_size[:-2]) * 10**3 # Note: pickle adds some junk so the weight of the file can end up being slightly bigger than # the size asked for (since we count parameters) if size >= max_size_int + 50000: with open(shard_file, "rb") as state_f: state_file = from_bytes(FlaxBertModel, state_f.read()) self.assertEqual(len(state_file), 1) # Check the index and the shard files found match with open(index_file, "r", encoding="utf-8") as f: index = json.loads(f.read()) all_shards = set(index["weight_map"].values()) shards_found = {f for f in os.listdir(tmp_dir) if f.endswith(".msgpack")} self.assertSetEqual(all_shards, shards_found) # Finally, check the model can be reloaded new_model = FlaxBertModel.from_pretrained(tmp_dir) for p1, p2 in zip(flatten_dict(model.params).values(), flatten_dict(new_model.params).values()): self.assertTrue(np.allclose(np.array(p1), np.array(p2))) @is_pt_flax_cross_test def test_from_sharded_pt(self): model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True) ref_model = FlaxBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-fx-only") for key, ref_val in flatten_dict(ref_model.params).items(): val = flatten_dict(model.params)[key] assert np.allclose(np.array(val), np.array(ref_val)) def test_gradient_checkpointing(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # prepare inputs prepared_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) remat_model = model_class(config) try: remat_model.enable_gradient_checkpointing() except NotImplementedError: continue outputs = model(**prepared_inputs_dict) remat_outputs = remat_model(**prepared_inputs_dict) # ensure that the dicts of outputs contain the same keys self.assertEqual(outputs.keys(), remat_outputs.keys()) outputs = outputs.to_tuple() remat_outputs = remat_outputs.to_tuple() # ensure that the outputs remain precisely equal for output, remat_output in zip(outputs, remat_outputs): self.assertTrue((output == remat_output).all()) @require_flax @is_staging_test class FlaxModelPushToHubTester(unittest.TestCase): @classmethod def setUpClass(cls): cls._token = TOKEN set_access_token(TOKEN) HfFolder.save_token(TOKEN) @classmethod def tearDownClass(cls): try: delete_repo(token=cls._token, repo_id="test-model-flax") except HTTPError: pass try: delete_repo(token=cls._token, repo_id="valid_org/test-model-flax-org") except HTTPError: pass def test_push_to_hub(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("test-model-flax", use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # Reset repo delete_repo(token=self._token, repo_id="test-model-flax") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(tmp_dir, repo_id="test-model-flax", push_to_hub=True, use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained(f"{USER}/test-model-flax") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def test_push_to_hub_in_organization(self): config = BertConfig( vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37 ) model = FlaxBertModel(config) model.push_to_hub("valid_org/test-model-flax-org", use_auth_token=self._token) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") # Reset repo delete_repo(token=self._token, repo_id="valid_org/test-model-flax-org") # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( tmp_dir, repo_id="valid_org/test-model-flax-org", push_to_hub=True, use_auth_token=self._token ) new_model = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org") base_params = flatten_dict(unfreeze(model.params)) new_params = flatten_dict(unfreeze(new_model.params)) for key in base_params.keys(): max_diff = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(max_diff, 1e-3, msg=f"{key} not identical") def check_models_equal(model1, model2): models_are_equal = True flat_params_1 = flatten_dict(model1.params) flat_params_2 = flatten_dict(model2.params) for key in flat_params_1.keys(): if np.sum(np.abs(flat_params_1[key] - flat_params_2[key])) > 1e-4: models_are_equal = False return models_are_equal @require_flax class FlaxModelUtilsTest(unittest.TestCase): def test_model_from_pretrained_subfolder(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder)) with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_subfolder_sharded(self): config = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only") model = FlaxBertModel(config) subfolder = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(tmp_dir, subfolder), max_shard_size="10KB") with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(tmp_dir) model_loaded = FlaxBertModel.from_pretrained(tmp_dir, subfolder=subfolder) self.assertTrue(check_models_equal(model, model_loaded)) def test_model_from_pretrained_hub_subfolder(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model) def test_model_from_pretrained_hub_subfolder_sharded(self): subfolder = "bert" model_id = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(OSError): _ = FlaxBertModel.from_pretrained(model_id) model = FlaxBertModel.from_pretrained(model_id, subfolder=subfolder) self.assertIsNotNone(model)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/convnext/feature_extraction_convnext.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for ConvNeXT.""" import warnings from ...utils import logging from .image_processing_convnext import ConvNextImageProcessor logger = logging.get_logger(__name__) class ConvNextFeatureExtractor(ConvNextImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ConvNextImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for ConvNeXT.""" import warnings from ...utils import logging from .image_processing_convnext import ConvNextImageProcessor logger = logging.get_logger(__name__) class ConvNextFeatureExtractor(ConvNextImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class ConvNextFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ConvNextImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/qdqbert/__init__.py
# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_qdqbert"] = [ "QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "QDQBertForMaskedLM", "QDQBertForMultipleChoice", "QDQBertForNextSentencePrediction", "QDQBertForQuestionAnswering", "QDQBertForSequenceClassification", "QDQBertForTokenClassification", "QDQBertLayer", "QDQBertLMHeadModel", "QDQBertModel", "QDQBertPreTrainedModel", "load_tf_weights_in_qdqbert", ] if TYPE_CHECKING: from .configuration_qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_qdqbert import ( QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST, QDQBertForMaskedLM, QDQBertForMultipleChoice, QDQBertForNextSentencePrediction, QDQBertForQuestionAnswering, QDQBertForSequenceClassification, QDQBertForTokenClassification, QDQBertLayer, QDQBertLMHeadModel, QDQBertModel, QDQBertPreTrainedModel, load_tf_weights_in_qdqbert, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# Copyright 2021 NVIDIA Corporation and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = {"configuration_qdqbert": ["QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "QDQBertConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_qdqbert"] = [ "QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "QDQBertForMaskedLM", "QDQBertForMultipleChoice", "QDQBertForNextSentencePrediction", "QDQBertForQuestionAnswering", "QDQBertForSequenceClassification", "QDQBertForTokenClassification", "QDQBertLayer", "QDQBertLMHeadModel", "QDQBertModel", "QDQBertPreTrainedModel", "load_tf_weights_in_qdqbert", ] if TYPE_CHECKING: from .configuration_qdqbert import QDQBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, QDQBertConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_qdqbert import ( QDQBERT_PRETRAINED_MODEL_ARCHIVE_LIST, QDQBertForMaskedLM, QDQBertForMultipleChoice, QDQBertForNextSentencePrediction, QDQBertForQuestionAnswering, QDQBertForSequenceClassification, QDQBertForTokenClassification, QDQBertLayer, QDQBertLMHeadModel, QDQBertModel, QDQBertPreTrainedModel, load_tf_weights_in_qdqbert, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/luke/__init__.py
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"], "tokenization_luke": ["LukeTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_luke"] = [ "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "LukeForMultipleChoice", "LukeForQuestionAnswering", "LukeForSequenceClassification", "LukeForTokenClassification", "LukeForMaskedLM", "LukeModel", "LukePreTrainedModel", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _import_structure = { "configuration_luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig"], "tokenization_luke": ["LukeTokenizer"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_luke"] = [ "LUKE_PRETRAINED_MODEL_ARCHIVE_LIST", "LukeForEntityClassification", "LukeForEntityPairClassification", "LukeForEntitySpanClassification", "LukeForMultipleChoice", "LukeForQuestionAnswering", "LukeForSequenceClassification", "LukeForTokenClassification", "LukeForMaskedLM", "LukeModel", "LukePreTrainedModel", ] if TYPE_CHECKING: from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig from .tokenization_luke import LukeTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_luke import ( LUKE_PRETRAINED_MODEL_ARCHIVE_LIST, LukeForEntityClassification, LukeForEntityPairClassification, LukeForEntitySpanClassification, LukeForMaskedLM, LukeForMultipleChoice, LukeForQuestionAnswering, LukeForSequenceClassification, LukeForTokenClassification, LukeModel, LukePreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/yolos/configuration_yolos.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ YOLOS model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class YolosConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`YolosModel`]. It is used to instantiate a YOLOS model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the YOLOS [hustvl/yolos-base](https://huggingface.co/hustvl/yolos-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`List[int]`, *optional*, defaults to `[512, 864]`): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to `16`): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to `3`): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. num_detection_tokens (`int`, *optional*, defaults to `100`): The number of detection tokens. use_mid_position_embeddings (`bool`, *optional*, defaults to `True`): Whether to use the mid-layer position encodings. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. Example: ```python >>> from transformers import YolosConfig, YolosModel >>> # Initializing a YOLOS hustvl/yolos-base style configuration >>> configuration = YolosConfig() >>> # Initializing a model (with random weights) from the hustvl/yolos-base style configuration >>> model = YolosModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "yolos" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=[512, 864], patch_size=16, num_channels=3, qkv_bias=True, num_detection_tokens=100, use_mid_position_embeddings=True, auxiliary_loss=False, class_cost=1, bbox_cost=5, giou_cost=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.num_detection_tokens = num_detection_tokens self.use_mid_position_embeddings = use_mid_position_embeddings self.auxiliary_loss = auxiliary_loss # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient class YolosOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 @property def default_onnx_opset(self) -> int: return 12
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ YOLOS model configuration""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP = { "hustvl/yolos-small": "https://huggingface.co/hustvl/yolos-small/resolve/main/config.json", # See all YOLOS models at https://huggingface.co/models?filter=yolos } class YolosConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`YolosModel`]. It is used to instantiate a YOLOS model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the YOLOS [hustvl/yolos-base](https://huggingface.co/hustvl/yolos-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. image_size (`List[int]`, *optional*, defaults to `[512, 864]`): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to `16`): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to `3`): The number of input channels. qkv_bias (`bool`, *optional*, defaults to `True`): Whether to add a bias to the queries, keys and values. num_detection_tokens (`int`, *optional*, defaults to `100`): The number of detection tokens. use_mid_position_embeddings (`bool`, *optional*, defaults to `True`): Whether to use the mid-layer position encodings. auxiliary_loss (`bool`, *optional*, defaults to `False`): Whether auxiliary decoding losses (loss at each decoder layer) are to be used. class_cost (`float`, *optional*, defaults to 1): Relative weight of the classification error in the Hungarian matching cost. bbox_cost (`float`, *optional*, defaults to 5): Relative weight of the L1 error of the bounding box coordinates in the Hungarian matching cost. giou_cost (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss of the bounding box in the Hungarian matching cost. bbox_loss_coefficient (`float`, *optional*, defaults to 5): Relative weight of the L1 bounding box loss in the object detection loss. giou_loss_coefficient (`float`, *optional*, defaults to 2): Relative weight of the generalized IoU loss in the object detection loss. eos_coefficient (`float`, *optional*, defaults to 0.1): Relative classification weight of the 'no-object' class in the object detection loss. Example: ```python >>> from transformers import YolosConfig, YolosModel >>> # Initializing a YOLOS hustvl/yolos-base style configuration >>> configuration = YolosConfig() >>> # Initializing a model (with random weights) from the hustvl/yolos-base style configuration >>> model = YolosModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "yolos" def __init__( self, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, initializer_range=0.02, layer_norm_eps=1e-12, image_size=[512, 864], patch_size=16, num_channels=3, qkv_bias=True, num_detection_tokens=100, use_mid_position_embeddings=True, auxiliary_loss=False, class_cost=1, bbox_cost=5, giou_cost=2, bbox_loss_coefficient=5, giou_loss_coefficient=2, eos_coefficient=0.1, **kwargs, ): super().__init__(**kwargs) self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.qkv_bias = qkv_bias self.num_detection_tokens = num_detection_tokens self.use_mid_position_embeddings = use_mid_position_embeddings self.auxiliary_loss = auxiliary_loss # Hungarian matcher self.class_cost = class_cost self.bbox_cost = bbox_cost self.giou_cost = giou_cost # Loss coefficients self.bbox_loss_coefficient = bbox_loss_coefficient self.giou_loss_coefficient = giou_loss_coefficient self.eos_coefficient = eos_coefficient class YolosOnnxConfig(OnnxConfig): torch_onnx_minimum_version = version.parse("1.11") @property def inputs(self) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def atol_for_validation(self) -> float: return 1e-4 @property def default_onnx_opset(self) -> int: return 12
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/gpt_neox_japanese/__init__.py
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/chinese_clip/image_processing_chinese_clip.py
# coding=utf-8 # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Chinese-CLIP.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import logging from ...utils.import_utils import is_vision_available logger = logging.get_logger(__name__) if is_vision_available(): import PIL class ChineseCLIPImageProcessor(BaseImageProcessor): r""" Constructs a Chinese-CLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`Dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize: Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Image standard deviation. do_convert_rgb (`bool`, *optional*, defaults to `True`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ size = get_size_dict(size, default_to_square=False) output_size = get_resize_output_image_size( image, size=(size["height"], size["width"]), default_to_square=False ) return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs) def center_crop( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image. If the image is too small to be cropped to the size given, it will be padded (so the returned result will always be of size `size`). Args: image (`np.ndarray`): Image to center crop. size (`Dict[str, int]`): Size of the output image in the form of a dictionary with keys `height` and `width`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ size = get_size_dict(size) return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs) def rescale( self, image: np.ndarray, scale: Union[int, float], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Rescale an image by a scale factor. image = image * scale. Args: image (`np.ndarray`): Image to rescale. scale (`int` or `float`): Scale to apply to the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ return rescale(image, scale=scale, data_format=data_format, **kwargs) def normalize( self, image: np.ndarray, mean: Union[float, List[float]], std: Union[float, List[float]], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. Args: image (`np.ndarray`): Image to normalize. image_mean (`float` or `List[float]`): Image mean. image_std (`float` or `List[float]`): Image standard deviation. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: int = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_resize: images = [self.resize(image=image, size=size, resample=resample) for image in images] if do_center_crop: images = [self.center_crop(image=image, size=crop_size) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images] images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
# coding=utf-8 # Copyright 2022 The OFA-Sys Team Authors and The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for Chinese-CLIP.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import logging from ...utils.import_utils import is_vision_available logger = logging.get_logger(__name__) if is_vision_available(): import PIL class ChineseCLIPImageProcessor(BaseImageProcessor): r""" Constructs a Chinese-CLIP image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in the `preprocess` method. size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`): Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess` method. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method. do_center_crop (`bool`, *optional*, defaults to `True`): Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the `preprocess` method. crop_size (`Dict[str, int]` *optional*, defaults to 224): Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess` method. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess` method. do_normalize: Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Image standard deviation. do_convert_rgb (`bool`, *optional*, defaults to `True`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PILImageResampling.BICUBIC, do_center_crop: bool = True, crop_size: Dict[str, int] = None, do_rescale: bool = True, rescale_factor: Union[int, float] = 1 / 255, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"shortest_edge": 224} size = get_size_dict(size, default_to_square=False) crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224} crop_size = get_size_dict(crop_size) self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD self.do_convert_rgb = do_convert_rgb def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PILImageResampling.BICUBIC, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`): Resampling filter to use when resiizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ size = get_size_dict(size, default_to_square=False) output_size = get_resize_output_image_size( image, size=(size["height"], size["width"]), default_to_square=False ) return resize(image, size=output_size, resample=resample, data_format=data_format, **kwargs) def center_crop( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image. If the image is too small to be cropped to the size given, it will be padded (so the returned result will always be of size `size`). Args: image (`np.ndarray`): Image to center crop. size (`Dict[str, int]`): Size of the output image in the form of a dictionary with keys `height` and `width`. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ size = get_size_dict(size) return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs) def rescale( self, image: np.ndarray, scale: Union[int, float], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Rescale an image by a scale factor. image = image * scale. Args: image (`np.ndarray`): Image to rescale. scale (`int` or `float`): Scale to apply to the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ return rescale(image, scale=scale, data_format=data_format, **kwargs) def normalize( self, image: np.ndarray, mean: Union[float, List[float]], std: Union[float, List[float]], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. Args: image (`np.ndarray`): Image to normalize. image_mean (`float` or `List[float]`): Image mean. image_std (`float` or `List[float]`): Image standard deviation. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample: PILImageResampling = None, do_center_crop: bool = None, crop_size: int = None, do_rescale: bool = None, rescale_factor: float = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, do_convert_rgb: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: Optional[ChannelDimension] = ChannelDimension.FIRST, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with the longest edge resized to keep the input aspect ratio. resample (`int`, *optional*, defaults to `self.resample`): Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the center crop. Only has an effect if `do_center_crop` is set to `True`. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to `True`. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`): Whether to convert the image to RGB. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - Unset: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. - Unset: defaults to the channel dimension format of the input image. """ do_resize = do_resize if do_resize is not None else self.do_resize size = size if size is not None else self.size size = get_size_dict(size, default_to_square=False) resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size) do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # PIL RGBA images are converted to RGB if do_convert_rgb: images = [convert_to_rgb(image) for image in images] # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_resize: images = [self.resize(image=image, size=size, resample=resample) for image in images] if do_center_crop: images = [self.center_crop(image=image, size=crop_size) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images] images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./examples/research_projects/rag/test_distributed_retriever.py
import json import os import shutil import sys import tempfile import unittest from unittest import TestCase from unittest.mock import patch import faiss import numpy as np from datasets import Dataset from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig from transformers.file_utils import is_datasets_available, is_faiss_available, is_psutil_available, is_torch_available from transformers.integrations import is_ray_available from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_ray sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # noqa: E402 # isort:skip if is_torch_available(): from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip else: RagPyTorchDistributedRetriever = None if is_ray_available(): import ray # noqa: E402 # isort:skip from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever # noqa: E402 # isort:skip else: ray = None RagRayDistributedRetriever = None RayRetriever = None def require_distributed_retrieval(test_case): """ Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with :class:`~transformers.RagRetriever`. These tests are skipped when respective libraries are not installed. """ if not (is_datasets_available() and is_faiss_available() and is_psutil_available()): test_case = unittest.skip("test requires Datasets, Faiss, psutil")(test_case) return test_case @require_distributed_retrieval class RagRetrieverTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) def get_dummy_dataset(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def get_dummy_pytorch_distributed_retriever( self, init_retrieval: bool, port=12345 ) -> RagPyTorchDistributedRetriever: dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagPyTorchDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) if init_retrieval: retriever.init_retrieval(port) return retriever def get_dummy_ray_distributed_retriever(self, init_retrieval: bool) -> RagRayDistributedRetriever: # Have to run in local mode because sys.path modifications at top of # file are not propogated to remote workers. # https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder ray.init(local_mode=True) config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) remote_cls = ray.remote(RayRetriever) workers = [remote_cls.remote() for _ in range(1)] with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = self.get_dummy_dataset() retriever = RagRayDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), retrieval_workers=workers, ) if init_retrieval: retriever.init_retrieval() return retriever def get_dummy_custom_hf_index_pytorch_retriever(self, init_retrieval: bool, from_disk: bool, port=12345): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", ) if from_disk: config.passages_path = os.path.join(self.tmpdirname, "dataset") config.index_path = os.path.join(self.tmpdirname, "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) del dataset retriever = RagPyTorchDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) else: retriever = RagPyTorchDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, dataset), ) if init_retrieval: retriever.init_retrieval(port) return retriever def get_dummy_custom_hf_index_ray_retriever(self, init_retrieval: bool, from_disk: bool): # Have to run in local mode because sys.path modifications at top of # file are not propogated to remote workers. # https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder ray.init(local_mode=True) dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", ) remote_cls = ray.remote(RayRetriever) workers = [remote_cls.remote() for _ in range(1)] if from_disk: config.passages_path = os.path.join(self.tmpdirname, "dataset") config.index_path = os.path.join(self.tmpdirname, "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) del dataset retriever = RagRayDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), retrieval_workers=workers, index=CustomHFIndex.load_from_disk( vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path, ), ) else: retriever = RagRayDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), retrieval_workers=workers, index=CustomHFIndex(config.retrieval_vector_size, dataset), ) if init_retrieval: retriever.init_retrieval() return retriever def distributed_retriever_check(self, retriever: RagRetriever, hidden_states: np.array, n_docs: int) -> None: retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_pytorch_distributed_retriever_retrieve(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_pytorch_distributed_retriever(init_retrieval=True), hidden_states, n_docs ) def test_custom_hf_index_pytorch_retriever_retrieve(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=False), hidden_states, n_docs, ) def test_custom_pytorch_distributed_retriever_retrieve_from_disk(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=True), hidden_states, n_docs, ) @require_ray def test_ray_distributed_retriever_retrieve(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_ray_distributed_retriever(init_retrieval=True), hidden_states, n_docs ) ray.shutdown() @require_ray def test_custom_hf_index_ray_retriever_retrieve(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) with self.assertRaises(ValueError): self.distributed_retriever_check( self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=False), hidden_states, n_docs, ) ray.shutdown() @require_ray def test_custom_ray_distributed_retriever_retrieve_from_disk(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=True), hidden_states, n_docs ) ray.shutdown()
import json import os import shutil import sys import tempfile import unittest from unittest import TestCase from unittest.mock import patch import faiss import numpy as np from datasets import Dataset from transformers import BartConfig, BartTokenizer, DPRConfig, DPRQuestionEncoderTokenizer, RagConfig from transformers.file_utils import is_datasets_available, is_faiss_available, is_psutil_available, is_torch_available from transformers.integrations import is_ray_available from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES from transformers.testing_utils import require_ray sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # noqa: E402 # isort:skip if is_torch_available(): from distributed_pytorch_retriever import RagPyTorchDistributedRetriever # noqa: E402 # isort:skip else: RagPyTorchDistributedRetriever = None if is_ray_available(): import ray # noqa: E402 # isort:skip from distributed_ray_retriever import RagRayDistributedRetriever, RayRetriever # noqa: E402 # isort:skip else: ray = None RagRayDistributedRetriever = None RayRetriever = None def require_distributed_retrieval(test_case): """ Decorator marking a test that requires a set of dependencies necessary for pefrorm retrieval with :class:`~transformers.RagRetriever`. These tests are skipped when respective libraries are not installed. """ if not (is_datasets_available() and is_faiss_available() and is_psutil_available()): test_case = unittest.skip("test requires Datasets, Faiss, psutil")(test_case) return test_case @require_distributed_retrieval class RagRetrieverTest(TestCase): def setUp(self): self.tmpdirname = tempfile.mkdtemp() self.retrieval_vector_size = 8 # DPR tok vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] dpr_tokenizer_path = os.path.join(self.tmpdirname, "dpr_tokenizer") os.makedirs(dpr_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(dpr_tokenizer_path, DPR_VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) # BART tok vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} bart_tokenizer_path = os.path.join(self.tmpdirname, "bart_tokenizer") os.makedirs(bart_tokenizer_path, exist_ok=True) self.vocab_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(bart_tokenizer_path, BART_VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_dpr_tokenizer(self) -> DPRQuestionEncoderTokenizer: return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname, "dpr_tokenizer")) def get_bart_tokenizer(self) -> BartTokenizer: return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname, "bart_tokenizer")) def tearDown(self): shutil.rmtree(self.tmpdirname) def get_dummy_dataset(self): dataset = Dataset.from_dict( { "id": ["0", "1"], "text": ["foo", "bar"], "title": ["Foo", "Bar"], "embeddings": [np.ones(self.retrieval_vector_size), 2 * np.ones(self.retrieval_vector_size)], } ) dataset.add_faiss_index("embeddings", string_factory="Flat", metric_type=faiss.METRIC_INNER_PRODUCT) return dataset def get_dummy_pytorch_distributed_retriever( self, init_retrieval: bool, port=12345 ) -> RagPyTorchDistributedRetriever: dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = dataset retriever = RagPyTorchDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) if init_retrieval: retriever.init_retrieval(port) return retriever def get_dummy_ray_distributed_retriever(self, init_retrieval: bool) -> RagRayDistributedRetriever: # Have to run in local mode because sys.path modifications at top of # file are not propogated to remote workers. # https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder ray.init(local_mode=True) config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), ) remote_cls = ray.remote(RayRetriever) workers = [remote_cls.remote() for _ in range(1)] with patch("transformers.models.rag.retrieval_rag.load_dataset") as mock_load_dataset: mock_load_dataset.return_value = self.get_dummy_dataset() retriever = RagRayDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), retrieval_workers=workers, ) if init_retrieval: retriever.init_retrieval() return retriever def get_dummy_custom_hf_index_pytorch_retriever(self, init_retrieval: bool, from_disk: bool, port=12345): dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", ) if from_disk: config.passages_path = os.path.join(self.tmpdirname, "dataset") config.index_path = os.path.join(self.tmpdirname, "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) del dataset retriever = RagPyTorchDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), ) else: retriever = RagPyTorchDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), index=CustomHFIndex(config.retrieval_vector_size, dataset), ) if init_retrieval: retriever.init_retrieval(port) return retriever def get_dummy_custom_hf_index_ray_retriever(self, init_retrieval: bool, from_disk: bool): # Have to run in local mode because sys.path modifications at top of # file are not propogated to remote workers. # https://stackoverflow.com/questions/54338013/parallel-import-a-python-file-from-sibling-folder ray.init(local_mode=True) dataset = self.get_dummy_dataset() config = RagConfig( retrieval_vector_size=self.retrieval_vector_size, question_encoder=DPRConfig().to_dict(), generator=BartConfig().to_dict(), index_name="custom", ) remote_cls = ray.remote(RayRetriever) workers = [remote_cls.remote() for _ in range(1)] if from_disk: config.passages_path = os.path.join(self.tmpdirname, "dataset") config.index_path = os.path.join(self.tmpdirname, "index.faiss") dataset.get_index("embeddings").save(os.path.join(self.tmpdirname, "index.faiss")) dataset.drop_index("embeddings") dataset.save_to_disk(os.path.join(self.tmpdirname, "dataset")) del dataset retriever = RagRayDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), retrieval_workers=workers, index=CustomHFIndex.load_from_disk( vector_size=config.retrieval_vector_size, dataset_path=config.passages_path, index_path=config.index_path, ), ) else: retriever = RagRayDistributedRetriever( config, question_encoder_tokenizer=self.get_dpr_tokenizer(), generator_tokenizer=self.get_bart_tokenizer(), retrieval_workers=workers, index=CustomHFIndex(config.retrieval_vector_size, dataset), ) if init_retrieval: retriever.init_retrieval() return retriever def distributed_retriever_check(self, retriever: RagRetriever, hidden_states: np.array, n_docs: int) -> None: retrieved_doc_embeds, doc_ids, doc_dicts = retriever.retrieve(hidden_states, n_docs=n_docs) self.assertEqual(retrieved_doc_embeds.shape, (2, n_docs, self.retrieval_vector_size)) self.assertEqual(len(doc_dicts), 2) self.assertEqual(sorted(doc_dicts[0]), ["embeddings", "id", "text", "title"]) self.assertEqual(len(doc_dicts[0]["id"]), n_docs) self.assertEqual(doc_dicts[0]["id"][0], "1") # max inner product is reached with second doc self.assertEqual(doc_dicts[1]["id"][0], "0") # max inner product is reached with first doc self.assertListEqual(doc_ids.tolist(), [[1], [0]]) def test_pytorch_distributed_retriever_retrieve(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_pytorch_distributed_retriever(init_retrieval=True), hidden_states, n_docs ) def test_custom_hf_index_pytorch_retriever_retrieve(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=False), hidden_states, n_docs, ) def test_custom_pytorch_distributed_retriever_retrieve_from_disk(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_custom_hf_index_pytorch_retriever(init_retrieval=True, from_disk=True), hidden_states, n_docs, ) @require_ray def test_ray_distributed_retriever_retrieve(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_ray_distributed_retriever(init_retrieval=True), hidden_states, n_docs ) ray.shutdown() @require_ray def test_custom_hf_index_ray_retriever_retrieve(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) with self.assertRaises(ValueError): self.distributed_retriever_check( self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=False), hidden_states, n_docs, ) ray.shutdown() @require_ray def test_custom_ray_distributed_retriever_retrieve_from_disk(self): n_docs = 1 hidden_states = np.array( [np.ones(self.retrieval_vector_size), -np.ones(self.retrieval_vector_size)], dtype=np.float32 ) self.distributed_retriever_check( self.get_dummy_custom_hf_index_ray_retriever(init_retrieval=True, from_disk=True), hidden_states, n_docs ) ray.shutdown()
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/gpt2/tokenization_gpt2_tf.py
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpt2 import GPT2Tokenizer class TFGPT2Tokenizer(tf.keras.layers.Layer): """ This is an in-graph tokenizer for GPT2. It should be initialized similarly to other tokenizers, using the `from_pretrained()` method. It can also be initialized with the `from_tokenizer()` method, which imports settings from an existing standard tokenizer object. In-graph tokenizers, unlike other Hugging Face tokenizers, are actually Keras layers and are designed to be run when the model is called, rather than during preprocessing. As a result, they have somewhat more limited options than standard tokenizer classes. They are most useful when you want to create an end-to-end model that goes straight from `tf.string` inputs to outputs. Args: vocab (Dict[str, int]): Vocabulary dict for Byte Pair Tokenizer merges (List[str]): Merges list for Byte Pair Tokenizer """ def __init__(self, vocab: Dict[str, int], merges: List[str], max_length: int = None, pad_token_id: int = None): super().__init__() self.pad_token_id = pad_token_id self.max_length = max_length self.vocab = vocab self.merges = merges self.tf_tokenizer = BytePairTokenizer(vocab, merges, sequence_length=max_length) @classmethod def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs): """Creates TFGPT2Tokenizer from GPT2Tokenizer Args: tokenizer (GPT2Tokenizer) Examples: ```python from transformers import AutoTokenizer, TFGPT2Tokenizer tokenizer = AutoTokenizer.from_pretrained("gpt2") tf_tokenizer = TFGPT2Tokenizer.from_tokenizer(tokenizer) ``` """ merges = [" ".join(m) for m in tokenizer.bpe_ranks.keys()] vocab = tokenizer.get_vocab() return cls(vocab, merges, *args, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): """Creates TFGPT2Tokenizer from pretrained GPT2Tokenizer Args: pretrained_model_name_or_path (Union[str, os.PathLike]): Path to pretrained model Examples: ```python from transformers import TFGPT2Tokenizer tf_tokenizer = TFGPT2Tokenizer.from_pretrained("gpt2") ``` """ tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs) @classmethod def from_config(cls, config): """Creates TFGPT2Tokenizer from configurations Args: config (Dict): Dictionary with keys such as stated in `get_config`. """ return cls(**config) def get_config(self): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def call(self, x, max_length: int = None): input_ids = self.tf_tokenizer(x) attention_mask = tf.ones_like(input_ids) if self.pad_token_id is not None: # pad the tokens up to max length max_length = max_length if max_length is not None else self.max_length if max_length is not None: input_ids, attention_mask = pad_model_inputs( input_ids, max_seq_length=max_length, pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpt2 import GPT2Tokenizer class TFGPT2Tokenizer(tf.keras.layers.Layer): """ This is an in-graph tokenizer for GPT2. It should be initialized similarly to other tokenizers, using the `from_pretrained()` method. It can also be initialized with the `from_tokenizer()` method, which imports settings from an existing standard tokenizer object. In-graph tokenizers, unlike other Hugging Face tokenizers, are actually Keras layers and are designed to be run when the model is called, rather than during preprocessing. As a result, they have somewhat more limited options than standard tokenizer classes. They are most useful when you want to create an end-to-end model that goes straight from `tf.string` inputs to outputs. Args: vocab (Dict[str, int]): Vocabulary dict for Byte Pair Tokenizer merges (List[str]): Merges list for Byte Pair Tokenizer """ def __init__(self, vocab: Dict[str, int], merges: List[str], max_length: int = None, pad_token_id: int = None): super().__init__() self.pad_token_id = pad_token_id self.max_length = max_length self.vocab = vocab self.merges = merges self.tf_tokenizer = BytePairTokenizer(vocab, merges, sequence_length=max_length) @classmethod def from_tokenizer(cls, tokenizer: GPT2Tokenizer, *args, **kwargs): """Creates TFGPT2Tokenizer from GPT2Tokenizer Args: tokenizer (GPT2Tokenizer) Examples: ```python from transformers import AutoTokenizer, TFGPT2Tokenizer tokenizer = AutoTokenizer.from_pretrained("gpt2") tf_tokenizer = TFGPT2Tokenizer.from_tokenizer(tokenizer) ``` """ merges = [" ".join(m) for m in tokenizer.bpe_ranks.keys()] vocab = tokenizer.get_vocab() return cls(vocab, merges, *args, **kwargs) @classmethod def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], *init_inputs, **kwargs): """Creates TFGPT2Tokenizer from pretrained GPT2Tokenizer Args: pretrained_model_name_or_path (Union[str, os.PathLike]): Path to pretrained model Examples: ```python from transformers import TFGPT2Tokenizer tf_tokenizer = TFGPT2Tokenizer.from_pretrained("gpt2") ``` """ tokenizer = GPT2Tokenizer.from_pretrained(pretrained_model_name_or_path, *init_inputs, **kwargs) return cls.from_tokenizer(tokenizer, *init_inputs, **kwargs) @classmethod def from_config(cls, config): """Creates TFGPT2Tokenizer from configurations Args: config (Dict): Dictionary with keys such as stated in `get_config`. """ return cls(**config) def get_config(self): return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def call(self, x, max_length: int = None): input_ids = self.tf_tokenizer(x) attention_mask = tf.ones_like(input_ids) if self.pad_token_id is not None: # pad the tokens up to max length max_length = max_length if max_length is not None else self.max_length if max_length is not None: input_ids, attention_mask = pad_model_inputs( input_ids, max_seq_length=max_length, pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/efficientnet/test_modeling_efficientnet.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch EfficientNet model. """ import inspect import unittest from transformers import EfficientNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import EfficientNetForImageClassification, EfficientNetModel from transformers.models.efficientnet.modeling_efficientnet import EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class EfficientNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, kernel_sizes=[3, 3, 5], in_channels=[32, 16, 24], out_channels=[16, 24, 40], strides=[1, 1, 2], num_block_repeats=[1, 1, 2], expand_ratios=[1, 6, 6], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", num_labels=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.kernel_sizes = kernel_sizes self.in_channels = in_channels self.out_channels = out_channels self.strides = strides self.num_block_repeats = num_block_repeats self.expand_ratios = expand_ratios self.is_training = is_training self.hidden_act = hidden_act self.num_labels = num_labels self.use_labels = use_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return EfficientNetConfig( num_channels=self.num_channels, kernel_sizes=self.kernel_sizes, in_channels=self.in_channels, out_channels=self.out_channels, strides=self.strides, num_block_repeats=self.num_block_repeats, expand_ratios=self.expand_ratios, hidden_act=self.hidden_act, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = EfficientNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 4, W // 4 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, config.hidden_dim, self.image_size // 4, self.image_size // 4), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): model = EfficientNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class EfficientNetModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as EfficientNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (EfficientNetModel, EfficientNetForImageClassification) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = EfficientNetModelTester(self) self.config_tester = ConfigTester( self, config_class=EfficientNetConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="EfficientNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="EfficientNet does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="EfficientNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states num_blocks = sum(config.num_block_repeats) * 4 self.assertEqual(len(hidden_states), num_blocks) # EfficientNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = EfficientNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class EfficientNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("google/efficientnet-b7") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = EfficientNetForImageClassification.from_pretrained("google/efficientnet-b7").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.0001, 0.0002, 0.0002]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch EfficientNet model. """ import inspect import unittest from transformers import EfficientNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor if is_torch_available(): import torch from transformers import EfficientNetForImageClassification, EfficientNetModel from transformers.models.efficientnet.modeling_efficientnet import EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class EfficientNetModelTester: def __init__( self, parent, batch_size=13, image_size=32, num_channels=3, kernel_sizes=[3, 3, 5], in_channels=[32, 16, 24], out_channels=[16, 24, 40], strides=[1, 1, 2], num_block_repeats=[1, 1, 2], expand_ratios=[1, 6, 6], is_training=True, use_labels=True, intermediate_size=37, hidden_act="gelu", num_labels=10, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.kernel_sizes = kernel_sizes self.in_channels = in_channels self.out_channels = out_channels self.strides = strides self.num_block_repeats = num_block_repeats self.expand_ratios = expand_ratios self.is_training = is_training self.hidden_act = hidden_act self.num_labels = num_labels self.use_labels = use_labels def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return EfficientNetConfig( num_channels=self.num_channels, kernel_sizes=self.kernel_sizes, in_channels=self.in_channels, out_channels=self.out_channels, strides=self.strides, num_block_repeats=self.num_block_repeats, expand_ratios=self.expand_ratios, hidden_act=self.hidden_act, num_labels=self.num_labels, ) def create_and_check_model(self, config, pixel_values, labels): model = EfficientNetModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) # expected last hidden states: B, C, H // 4, W // 4 self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, config.hidden_dim, self.image_size // 4, self.image_size // 4), ) def create_and_check_for_image_classification(self, config, pixel_values, labels): model = EfficientNetForImageClassification(config) model.to(torch_device) model.eval() result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class EfficientNetModelTest(ModelTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as EfficientNet does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (EfficientNetModel, EfficientNetForImageClassification) if is_torch_available() else () fx_compatible = False test_pruning = False test_resize_embeddings = False test_head_masking = False has_attentions = False def setUp(self): self.model_tester = EfficientNetModelTester(self) self.config_tester = ConfigTester( self, config_class=EfficientNetConfig, has_text_modality=False, hidden_size=37 ) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return @unittest.skip(reason="EfficientNet does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip(reason="EfficientNet does not support input and output embeddings") def test_model_common_attributes(self): pass @unittest.skip(reason="EfficientNet does not use feedforward chunking") def test_feed_forward_chunking(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states num_blocks = sum(config.num_block_repeats) * 4 self.assertEqual(len(hidden_states), num_blocks) # EfficientNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:]), [self.model_tester.image_size // 2, self.model_tester.image_size // 2], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in EFFICIENTNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = EfficientNetModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class EfficientNetModelIntegrationTest(unittest.TestCase): @cached_property def default_image_processor(self): return AutoImageProcessor.from_pretrained("google/efficientnet-b7") if is_vision_available() else None @slow def test_inference_image_classification_head(self): model = EfficientNetForImageClassification.from_pretrained("google/efficientnet-b7").to(torch_device) image_processor = self.default_image_processor image = prepare_img() inputs = image_processor(images=image, return_tensors="pt").to(torch_device) # forward pass with torch.no_grad(): outputs = model(**inputs) # verify the logits expected_shape = torch.Size((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor([0.0001, 0.0002, 0.0002]).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/donut/configuration_donut_swin.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Donut Swin Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class DonutSwinConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Donut [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. depths (`list(int)`, *optional*, defaults to [2, 2, 6, 2]): Depth of each layer in the Transformer encoder. num_heads (`list(int)`, *optional*, defaults to [3, 6, 12, 24]): Number of attention heads in each layer of the Transformer encoder. window_size (`int`, *optional*, defaults to 7): Size of windows. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to True): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. use_absolute_embeddings (`bool`, *optional*, defaults to False): Whether or not to add absolute position embeddings to the patch embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import DonutSwinConfig, DonutSwinModel >>> # Initializing a Donut naver-clova-ix/donut-base style configuration >>> configuration = DonutSwinConfig() >>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration >>> model = DonutSwinModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "donut-swin" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-5, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Donut Swin Transformer model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) DONUT_SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP = { "naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json", # See all Donut models at https://huggingface.co/models?filter=donut-swin } class DonutSwinConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`DonutSwinModel`]. It is used to instantiate a Donut model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the Donut [naver-clova-ix/donut-base](https://huggingface.co/naver-clova-ix/donut-base) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: image_size (`int`, *optional*, defaults to 224): The size (resolution) of each image. patch_size (`int`, *optional*, defaults to 4): The size (resolution) of each patch. num_channels (`int`, *optional*, defaults to 3): The number of input channels. embed_dim (`int`, *optional*, defaults to 96): Dimensionality of patch embedding. depths (`list(int)`, *optional*, defaults to [2, 2, 6, 2]): Depth of each layer in the Transformer encoder. num_heads (`list(int)`, *optional*, defaults to [3, 6, 12, 24]): Number of attention heads in each layer of the Transformer encoder. window_size (`int`, *optional*, defaults to 7): Size of windows. mlp_ratio (`float`, *optional*, defaults to 4.0): Ratio of MLP hidden dimensionality to embedding dimensionality. qkv_bias (`bool`, *optional*, defaults to True): Whether or not a learnable bias should be added to the queries, keys and values. hidden_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout probability for all fully connected layers in the embeddings and encoder. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. drop_path_rate (`float`, *optional*, defaults to 0.1): Stochastic depth rate. hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported. use_absolute_embeddings (`bool`, *optional*, defaults to False): Whether or not to add absolute position embeddings to the patch embeddings. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. Example: ```python >>> from transformers import DonutSwinConfig, DonutSwinModel >>> # Initializing a Donut naver-clova-ix/donut-base style configuration >>> configuration = DonutSwinConfig() >>> # Randomly initializing a model from the naver-clova-ix/donut-base style configuration >>> model = DonutSwinModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "donut-swin" attribute_map = { "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self, image_size=224, patch_size=4, num_channels=3, embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, initializer_range=0.02, layer_norm_eps=1e-5, **kwargs, ): super().__init__(**kwargs) self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_layers = len(depths) self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model self.hidden_size = int(embed_dim * 2 ** (len(depths) - 1))
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./conftest.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. git_repo_path = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_configure(config): config.addinivalue_line( "markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment") def pytest_addoption(parser): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): from transformers.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(terminalreporter, id=make_reports) def pytest_sessionfinish(session, exitstatus): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: session.exitstatus = 0 # Doctest custom flag to ignore output. IGNORE_RESULT = doctest.register_optionflag('IGNORE_RESULT') OutputChecker = doctest.OutputChecker class CustomOutputChecker(OutputChecker): def check_output(self, want, got, optionflags): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, want, got, optionflags) doctest.OutputChecker = CustomOutputChecker
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import doctest import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. git_repo_path = abspath(join(dirname(__file__), "src")) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action="ignore", category=FutureWarning) def pytest_configure(config): config.addinivalue_line( "markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested" ) config.addinivalue_line( "markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested" ) config.addinivalue_line("markers", "is_staging_test: mark test to run only in the staging environment") def pytest_addoption(parser): from transformers.testing_utils import pytest_addoption_shared pytest_addoption_shared(parser) def pytest_terminal_summary(terminalreporter): from transformers.testing_utils import pytest_terminal_summary_main make_reports = terminalreporter.config.getoption("--make-reports") if make_reports: pytest_terminal_summary_main(terminalreporter, id=make_reports) def pytest_sessionfinish(session, exitstatus): # If no tests are collected, pytest exists with code 5, which makes the CI fail. if exitstatus == 5: session.exitstatus = 0 # Doctest custom flag to ignore output. IGNORE_RESULT = doctest.register_optionflag('IGNORE_RESULT') OutputChecker = doctest.OutputChecker class CustomOutputChecker(OutputChecker): def check_output(self, want, got, optionflags): if IGNORE_RESULT & optionflags: return True return OutputChecker.check_output(self, want, got, optionflags) doctest.OutputChecker = CustomOutputChecker
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/gpt2/modeling_tf_gpt2.py
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 OpenAI GPT-2 model.""" from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFCausalLMOutputWithCrossAttentions, TFSequenceClassifierOutputWithPast, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFConv1D, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "gpt2" _CONFIG_FOR_DOC = "GPT2Config" TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl", "distilgpt2", # See all GPT-2 models at https://huggingface.co/models?filter=gpt2 ] class TFAttention(tf.keras.layers.Layer): def __init__(self, nx, config, scale=False, is_cross_attention=False, **kwargs): super().__init__(**kwargs) n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implementation] assert n_state % config.n_head == 0 self.n_head = config.n_head self.split_size = n_state self.scale = scale self.output_attentions = config.output_attentions self.is_cross_attention = is_cross_attention if self.is_cross_attention: self.c_attn = TFConv1D(n_state * 2, nx, initializer_range=config.initializer_range, name="c_attn") self.q_attn = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="q_attn") else: self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn") self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj") self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns, dtype): """ 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:, None] j = tf.range(ns) m = i >= j - ns + nd return tf.cast(m, dtype) def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False): # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores w = w / tf.math.sqrt(dk) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) b = self.causal_attention_mask(nd, ns, dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask attention_mask = tf.cast(attention_mask, dtype=w.dtype) w = w + attention_mask w = stable_softmax(w, axis=-1) w = self.attn_dropout(w, training=training) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call( self, x, layer_past, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=False, ): if encoder_hidden_states is not None: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." ) query = self.q_attn(x) kv_out = self.c_attn(encoder_hidden_states) key, value = tf.split(kv_out, 2, axis=2) attention_mask = encoder_attention_mask else: x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) if layer_past is not None: past_key, past_value = tf.unstack(layer_past, axis=0, num=2) key = tf.concat([past_key, key], axis=-2) value = tf.concat([past_value, value], axis=-2) # to cope with keras serialization if use_cache: present = tf.stack([key, value], axis=0) else: present = (None,) attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a, present] + attn_outputs[1:] return outputs # a, present, (attentions) class TFMLP(tf.keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super().__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc") self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj") self.act = get_tf_activation(config.activation_function) self.dropout = tf.keras.layers.Dropout(config.resid_pdrop) def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 class TFBlock(tf.keras.layers.Layer): def __init__(self, config, scale=False, **kwargs): super().__init__(**kwargs) nx = config.n_embd inner_dim = config.n_inner if config.n_inner is not None else 4 * nx self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1") self.attn = TFAttention(nx, config, scale, name="attn") self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2") if config.add_cross_attention: self.crossattention = TFAttention(nx, config, scale, name="crossattention", is_cross_attention=True) self.ln_cross_attn = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_epsilon, name="ln_cross_attn" ) self.mlp = TFMLP(inner_dim, config, name="mlp") def call( self, x, layer_past, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=False, ): a = self.ln_1(x) output_attn = self.attn( a, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=use_cache, output_attentions=output_attentions, training=training, ) a = output_attn[0] # output_attn: a, present, (attentions) outputs = output_attn[1:] x = x + a # Cross-Attention Block if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) ca = self.ln_cross_attn(x) output_cross_attn = self.crossattention( ca, layer_past=None, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=False, output_attentions=output_attentions, training=training, ) ca = output_cross_attn[0] # output_attn: a, present, (cross_attentions) x = x + ca outputs = outputs + output_cross_attn[2:] # add cross attentions if we output attention weights m = self.ln_2(x) m = self.mlp(m, training=training) x = x + m outputs = [x] + outputs return outputs # x, present, (attentions, cross_attentions) @keras_serializable class TFGPT2MainLayer(tf.keras.layers.Layer): config_class = GPT2Config def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.use_cache = config.use_cache self.return_dict = config.use_return_dict self.num_hidden_layers = config.n_layer self.n_embd = config.n_embd self.n_positions = config.n_positions self.initializer_range = config.initializer_range self.wte = TFSharedEmbeddings( config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte" ) self.drop = tf.keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f") def build(self, input_shape): with tf.name_scope("wpe"): self.wpe = self.add_weight( name="embeddings", shape=[self.n_positions, self.n_embd], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) def get_input_embeddings(self): return self.wte def set_input_embeddings(self, value): self.wte.weight = value self.wte.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError @unpack_inputs def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past_key_values is None: past_length = 0 past_key_values = [None] * len(self.h) else: past_length = shape_list(past_key_values[0][0])[-2] if position_ids is None: position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0) if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. one_cst = tf.constant(1.0) attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0)) # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.config.add_cross_attention and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=encoder_hidden_states.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None encoder_attention_mask = encoder_extended_attention_mask # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound # indices on GPU, returning zeros instead. This is a dangerous silent behavior. tf.debugging.assert_less( input_ids, tf.cast(self.config.vocab_size, dtype=input_ids.dtype), message=( "input_ids must be smaller than the embedding layer's input dimension (got" f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" ), ) inputs_embeds = self.wte(input_ids, mode="embedding") position_embeds = tf.gather(self.wpe, position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids, mode="embedding") else: token_type_embeds = tf.constant(0.0) position_embeds = tf.cast(position_embeds, dtype=inputs_embeds.dtype) token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype) hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () if use_cache else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block( hidden_states, layer_past, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=training, ) hidden_states, present = outputs[:2] if use_cache: presents = presents + (present,) if output_attentions: all_attentions = all_attentions + (outputs[2],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (outputs[3],) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class TFGPT2PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPT2Config base_model_prefix = "transformer" # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias", r"h.\d+.crossattention.bias"] @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ dummy = {"input_ids": tf.constant(DUMMY_INPUTS, dtype=tf.int32)} # Add `encoder_hidden_states` to make the cross-attention layers' weights initialized if self.config.add_cross_attention: batch_size, seq_len = tf.constant(DUMMY_INPUTS).shape shape = (batch_size, seq_len) + (self.config.hidden_size,) h = tf.random.uniform(shape=shape) dummy["encoder_hidden_states"] = h return dummy @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) @dataclass class TFGPT2DoubleHeadsModelOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None mc_logits: tf.Tensor = None past_key_values: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None GPT2_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`GPT2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPT2_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`List[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for `past_key_values`. In other words, the `attention_mask` always has to have the length: `len(past_key_values) + len(input_ids)` [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", GPT2_START_DOCSTRING, ) class TFGPT2Model(TFGPT2PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past`). Set to `False` during training, `True` during generation """ outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention and output.cross_attentions is not None else None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=output.last_hidden_state, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns, ) @add_start_docstrings( """ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, ) class TFGPT2LMHeadModel(TFGPT2PreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name="transformer") def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) if token_type_ids is not None: token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1) position_ids = kwargs.get("position_ids", None) attention_mask = kwargs.get("attention_mask", None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) if past_key_values: position_ids = tf.expand_dims(position_ids[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "token_type_ids": token_type_ids, } @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past`). Set to `False` during training, `True` during generation labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.transformer.wte(hidden_states, mode="linear") loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention and output.cross_attentions is not None else None ) return TFCausalLMOutputWithCrossAttentions( logits=output.logits, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns ) @add_start_docstrings( """ The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, GPT2_START_DOCSTRING, ) class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) config.num_labels = 1 self.transformer = TFGPT2MainLayer(config, name="transformer") self.multiple_choice_head = TFSequenceSummary( config, initializer_range=config.initializer_range, name="multiple_choice_head" ) @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFGPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, mc_token_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFGPT2DoubleHeadsModelOutput, Tuple[tf.Tensor]]: r""" mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. Return: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFGPT2DoubleHeadsModel >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = TFGPT2DoubleHeadsModel.from_pretrained("gpt2") >>> # Add a [CLS] to the vocabulary (we should train it also!) >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) >>> embedding_layer = model.resize_token_embeddings( ... len(tokenizer) ... ) # Update the model embeddings with the new vocabulary size >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> encoded_choices = [tokenizer.encode(s) for s in choices] >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] >>> input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2 >>> mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1 >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) >>> lm_prediction_scores, mc_prediction_scores = outputs[:2] ```""" if input_ids is not None: input_shapes = shape_list(input_ids) else: input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None transformer_outputs = self.transformer( input_ids=flat_input_ids, past_key_values=past_key_values, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) lm_logits = self.transformer.wte(hidden_states, mode="linear") mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training) mc_logits = tf.squeeze(mc_logits, axis=-1) if not return_dict: return (lm_logits, mc_logits) + transformer_outputs[1:] return TFGPT2DoubleHeadsModelOutput( logits=lm_logits, mc_logits=mc_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="mc_token_ids"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFGPT2DoubleHeadsModelOutput( logits=output.logits, mc_logits=output.mc_logits, past_key_values=pkv, hidden_states=hs, attentions=attns, ) @add_start_docstrings( """ The GPT2 Model transformer with a sequence classification head on top (linear layer). [`TFGPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, GPT2_START_DOCSTRING, ) class TFGPT2ForSequenceClassification(TFGPT2PreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.score = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="score", use_bias=False, ) self.transformer = TFGPT2MainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint="microsoft/DialogRPT-updown", output_type=TFSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) logits_shape = shape_list(logits) in_logits = None if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = ( tf.reduce_sum( tf.cast( tf.math.not_equal(input_ids, self.config.pad_token_id), dtype=input_ids.dtype, ), -1, keepdims=False, ) - 1 ) in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) loss = None if labels is not None: assert ( self.config.pad_token_id is not None or logits_shape[0] == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if not tf.is_tensor(sequence_lengths): in_logits = logits[0 : logits_shape[0], sequence_lengths] loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels])) pooled_logits = in_logits if in_logits is not None else logits if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFSequenceClassifierOutputWithPast( logits=output.logits, past_key_values=pkv, hidden_states=hs, attentions=attns )
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 OpenAI GPT-2 model.""" from dataclasses import dataclass from typing import List, Optional, Tuple, Union import numpy as np import tensorflow as tf from ...activations_tf import get_tf_activation from ...modeling_tf_outputs import ( TFBaseModelOutputWithPastAndCrossAttentions, TFCausalLMOutputWithCrossAttentions, TFSequenceClassifierOutputWithPast, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFConv1D, TFModelInputType, TFPreTrainedModel, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, get_initializer, keras_serializable, unpack_inputs, ) from ...tf_utils import shape_list, stable_softmax from ...utils import ( DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging, replace_return_docstrings, ) from .configuration_gpt2 import GPT2Config logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "gpt2" _CONFIG_FOR_DOC = "GPT2Config" TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST = [ "gpt2", "gpt2-medium", "gpt2-large", "gpt2-xl", "distilgpt2", # See all GPT-2 models at https://huggingface.co/models?filter=gpt2 ] class TFAttention(tf.keras.layers.Layer): def __init__(self, nx, config, scale=False, is_cross_attention=False, **kwargs): super().__init__(**kwargs) n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implementation] assert n_state % config.n_head == 0 self.n_head = config.n_head self.split_size = n_state self.scale = scale self.output_attentions = config.output_attentions self.is_cross_attention = is_cross_attention if self.is_cross_attention: self.c_attn = TFConv1D(n_state * 2, nx, initializer_range=config.initializer_range, name="c_attn") self.q_attn = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="q_attn") else: self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn") self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj") self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns, dtype): """ 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:, None] j = tf.range(ns) m = i >= j - ns + nd return tf.cast(m, dtype) def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False): # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores w = w / tf.math.sqrt(dk) if not self.is_cross_attention: # if only "normal" attention layer implements causal mask # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) b = self.causal_attention_mask(nd, ns, dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask attention_mask = tf.cast(attention_mask, dtype=w.dtype) w = w + attention_mask w = stable_softmax(w, axis=-1) w = self.attn_dropout(w, training=training) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call( self, x, layer_past, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=False, ): if encoder_hidden_states is not None: if not hasattr(self, "q_attn"): raise ValueError( "If class is used as cross attention, the weights `q_attn` have to be defined. " "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." ) query = self.q_attn(x) kv_out = self.c_attn(encoder_hidden_states) key, value = tf.split(kv_out, 2, axis=2) attention_mask = encoder_attention_mask else: x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) if layer_past is not None: past_key, past_value = tf.unstack(layer_past, axis=0, num=2) key = tf.concat([past_key, key], axis=-2) value = tf.concat([past_value, value], axis=-2) # to cope with keras serialization if use_cache: present = tf.stack([key, value], axis=0) else: present = (None,) attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a, present] + attn_outputs[1:] return outputs # a, present, (attentions) class TFMLP(tf.keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super().__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc") self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj") self.act = get_tf_activation(config.activation_function) self.dropout = tf.keras.layers.Dropout(config.resid_pdrop) def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 class TFBlock(tf.keras.layers.Layer): def __init__(self, config, scale=False, **kwargs): super().__init__(**kwargs) nx = config.n_embd inner_dim = config.n_inner if config.n_inner is not None else 4 * nx self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1") self.attn = TFAttention(nx, config, scale, name="attn") self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2") if config.add_cross_attention: self.crossattention = TFAttention(nx, config, scale, name="crossattention", is_cross_attention=True) self.ln_cross_attn = tf.keras.layers.LayerNormalization( epsilon=config.layer_norm_epsilon, name="ln_cross_attn" ) self.mlp = TFMLP(inner_dim, config, name="mlp") def call( self, x, layer_past, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=False, ): a = self.ln_1(x) output_attn = self.attn( a, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=use_cache, output_attentions=output_attentions, training=training, ) a = output_attn[0] # output_attn: a, present, (attentions) outputs = output_attn[1:] x = x + a # Cross-Attention Block if encoder_hidden_states is not None: # add one self-attention block for cross-attention if not hasattr(self, "crossattention"): raise ValueError( f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " "cross-attention layers by setting `config.add_cross_attention=True`" ) ca = self.ln_cross_attn(x) output_cross_attn = self.crossattention( ca, layer_past=None, attention_mask=attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=False, output_attentions=output_attentions, training=training, ) ca = output_cross_attn[0] # output_attn: a, present, (cross_attentions) x = x + ca outputs = outputs + output_cross_attn[2:] # add cross attentions if we output attention weights m = self.ln_2(x) m = self.mlp(m, training=training) x = x + m outputs = [x] + outputs return outputs # x, present, (attentions, cross_attentions) @keras_serializable class TFGPT2MainLayer(tf.keras.layers.Layer): config_class = GPT2Config def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) self.config = config self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.use_cache = config.use_cache self.return_dict = config.use_return_dict self.num_hidden_layers = config.n_layer self.n_embd = config.n_embd self.n_positions = config.n_positions self.initializer_range = config.initializer_range self.wte = TFSharedEmbeddings( config.vocab_size, config.hidden_size, initializer_range=config.initializer_range, name="wte" ) self.drop = tf.keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config, scale=True, name=f"h_._{i}") for i in range(config.n_layer)] self.ln_f = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_f") def build(self, input_shape): with tf.name_scope("wpe"): self.wpe = self.add_weight( name="embeddings", shape=[self.n_positions, self.n_embd], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) def get_input_embeddings(self): return self.wte def set_input_embeddings(self, value): self.wte.weight = value self.wte.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError @unpack_inputs def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = shape_list(input_ids) input_ids = tf.reshape(input_ids, [-1, input_shape[-1]]) elif inputs_embeds is not None: input_shape = shape_list(inputs_embeds)[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if past_key_values is None: past_length = 0 past_key_values = [None] * len(self.h) else: past_length = shape_list(past_key_values[0][0])[-2] if position_ids is None: position_ids = tf.expand_dims(tf.range(past_length, input_shape[-1] + past_length), axis=0) if attention_mask is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. attention_mask_shape = shape_list(attention_mask) attention_mask = tf.reshape(attention_mask, (attention_mask_shape[0], 1, 1, attention_mask_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. one_cst = tf.constant(1.0) attention_mask = tf.cast(attention_mask, dtype=one_cst.dtype) attention_mask = tf.multiply(tf.subtract(one_cst, attention_mask), tf.constant(-10000.0)) # Copied from `modeling_tf_t5.py` with -1e9 -> -10000 if self.config.add_cross_attention and encoder_attention_mask is not None: # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, mask_seq_length, mask_seq_length] # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] encoder_attention_mask = tf.cast(encoder_attention_mask, dtype=encoder_hidden_states.dtype) num_dims_encoder_attention_mask = len(shape_list(encoder_attention_mask)) if num_dims_encoder_attention_mask == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] if num_dims_encoder_attention_mask == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow/transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = tf.math.equal(encoder_extended_attention_mask, # tf.transpose(encoder_extended_attention_mask, perm=(-1, -2))) encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None encoder_attention_mask = encoder_extended_attention_mask # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: raise NotImplementedError else: head_mask = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) position_ids = tf.reshape(position_ids, [-1, shape_list(position_ids)[-1]]) if inputs_embeds is None: # Note: tf.gather, on which the embedding layer is based, won't check positive out of bound # indices on GPU, returning zeros instead. This is a dangerous silent behavior. tf.debugging.assert_less( input_ids, tf.cast(self.config.vocab_size, dtype=input_ids.dtype), message=( "input_ids must be smaller than the embedding layer's input dimension (got" f" {tf.math.reduce_max(input_ids)} >= {self.config.vocab_size})" ), ) inputs_embeds = self.wte(input_ids, mode="embedding") position_embeds = tf.gather(self.wpe, position_ids) if token_type_ids is not None: token_type_ids = tf.reshape(token_type_ids, [-1, shape_list(token_type_ids)[-1]]) token_type_embeds = self.wte(token_type_ids, mode="embedding") else: token_type_embeds = tf.constant(0.0) position_embeds = tf.cast(position_embeds, dtype=inputs_embeds.dtype) token_type_embeds = tf.cast(token_type_embeds, dtype=inputs_embeds.dtype) hidden_states = inputs_embeds + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=training) output_shape = input_shape + [shape_list(hidden_states)[-1]] presents = () if use_cache else None all_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block( hidden_states, layer_past, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, use_cache, output_attentions, training=training, ) hidden_states, present = outputs[:2] if use_cache: presents = presents + (present,) if output_attentions: all_attentions = all_attentions + (outputs[2],) if self.config.add_cross_attention and encoder_hidden_states is not None: all_cross_attentions = all_cross_attentions + (outputs[3],) hidden_states = self.ln_f(hidden_states) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if output_attentions: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states, all_attentions, all_cross_attentions] if v is not None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) class TFGPT2PreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = GPT2Config base_model_prefix = "transformer" # names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model _keys_to_ignore_on_load_unexpected = [r"h.\d+.attn.bias", r"h.\d+.crossattention.bias"] @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: `Dict[str, tf.Tensor]`: The dummy inputs. """ dummy = {"input_ids": tf.constant(DUMMY_INPUTS, dtype=tf.int32)} # Add `encoder_hidden_states` to make the cross-attention layers' weights initialized if self.config.add_cross_attention: batch_size, seq_len = tf.constant(DUMMY_INPUTS).shape shape = (batch_size, seq_len) + (self.config.hidden_size,) h = tf.random.uniform(shape=shape) dummy["encoder_hidden_states"] = h return dummy @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) @dataclass class TFGPT2DoubleHeadsModelOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (`tf.Tensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (`tf.Tensor` of shape `(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads, sequence_length, embed_size_per_head)`). Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see `past_key_values` input) to speed up sequential decoding. hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): Tuple of `tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None mc_logits: tf.Tensor = None past_key_values: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None GPT2_START_DOCSTRING = r""" This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. <Tip> TensorFlow models and layers in `transformers` accept two formats as input: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional argument. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first positional argument: - a single Tensor with `input_ids` only and nothing else: `model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: `model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Note that when creating models and layers with [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry about any of this, as you can just pass inputs like you would to any other Python function! </Tip> Parameters: config ([`GPT2Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ GPT2_INPUTS_DOCSTRING = r""" Args: input_ids (`Numpy array` or `tf.Tensor` of shape `(batch_size, input_ids_length)`): `input_ids_length` = `sequence_length` if `past_key_values` is `None` else `past_key_values[0].shape[-2]` (`sequence_length` of input past key value states). Indices of input sequence tokens in the vocabulary. If `past_key_values` is used, only input IDs that do not have their past calculated should be passed as `input_ids`. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and [`PreTrainedTokenizer.encode`] for details. [What are input IDs?](../glossary#input-ids) past_key_values (`List[tf.Tensor]` of length `config.n_layers`): Contains pre-computed hidden-states (key and values in the attention blocks) as computed by the model (see `past_key_values` output below). Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as input ids as they have already been computed. attention_mask (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for `past_key_values`. In other words, the `attention_mask` always has to have the length: `len(past_key_values) + len(input_ids)` [What are attention masks?](../glossary#attention-mask) token_type_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, sequence_length)`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. [What are position IDs?](../glossary#position-ids) head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (`bool`, *optional*, defaults to `False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", GPT2_START_DOCSTRING, ) class TFGPT2Model(TFGPT2PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFBaseModelOutputWithPastAndCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past`). Set to `False` during training, `True` during generation """ outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) return outputs def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention and output.cross_attentions is not None else None ) return TFBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=output.last_hidden_state, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns, ) @add_start_docstrings( """ The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, GPT2_START_DOCSTRING, ) class TFGPT2LMHeadModel(TFGPT2PreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFGPT2MainLayer(config, name="transformer") def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def prepare_inputs_for_generation(self, inputs, past_key_values=None, use_cache=None, **kwargs): token_type_ids = kwargs.get("token_type_ids", None) # only last token for inputs_ids if past is defined in kwargs if past_key_values: inputs = tf.expand_dims(inputs[:, -1], -1) if token_type_ids is not None: token_type_ids = tf.expand_dims(token_type_ids[:, -1], -1) position_ids = kwargs.get("position_ids", None) attention_mask = kwargs.get("attention_mask", None) if attention_mask is not None and position_ids is None: position_ids = tf.math.cumsum(attention_mask, axis=-1, exclusive=True) if past_key_values: position_ids = tf.expand_dims(position_ids[:, -1], -1) return { "input_ids": inputs, "attention_mask": attention_mask, "position_ids": position_ids, "past_key_values": past_key_values, "use_cache": use_cache, "token_type_ids": token_type_ids, } @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_hidden_states: Optional[Union[np.ndarray, tf.Tensor]] = None, encoder_attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFCausalLMOutputWithCrossAttentions, Tuple[tf.Tensor]]: r""" encoder_hidden_states (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. past_key_values (`Tuple[Tuple[tf.Tensor]]` of length `config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If `past` are used, the user can optionally input only the last `decoder_input_ids` (those that don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all `decoder_input_ids` of shape `(batch_size, sequence_length)`. use_cache (`bool`, *optional*, defaults to `True`): If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see `past`). Set to `False` during training, `True` during generation labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.transformer.wte(hidden_states, mode="linear") loss = None if labels is not None: # shift labels to the left and cut last logit token shifted_logits = logits[:, :-1] labels = labels[:, 1:] loss = self.hf_compute_loss(labels, shifted_logits) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutputWithCrossAttentions( loss=loss, logits=logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, cross_attentions=transformer_outputs.cross_attentions, ) def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None cross_attns = ( tf.convert_to_tensor(output.cross_attentions) if self.config.output_attentions and self.config.add_cross_attention and output.cross_attentions is not None else None ) return TFCausalLMOutputWithCrossAttentions( logits=output.logits, past_key_values=pkv, hidden_states=hs, attentions=attns, cross_attentions=cross_attns ) @add_start_docstrings( """ The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, GPT2_START_DOCSTRING, ) class TFGPT2DoubleHeadsModel(TFGPT2PreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) config.num_labels = 1 self.transformer = TFGPT2MainLayer(config, name="transformer") self.multiple_choice_head = TFSequenceSummary( config, initializer_range=config.initializer_range, name="multiple_choice_head" ) @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFGPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, mc_token_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, training: Optional[bool] = False, ) -> Union[TFGPT2DoubleHeadsModelOutput, Tuple[tf.Tensor]]: r""" mc_token_ids (`tf.Tensor` or `Numpy array` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - 1]`. Return: Examples: ```python >>> import tensorflow as tf >>> from transformers import AutoTokenizer, TFGPT2DoubleHeadsModel >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = TFGPT2DoubleHeadsModel.from_pretrained("gpt2") >>> # Add a [CLS] to the vocabulary (we should train it also!) >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) >>> embedding_layer = model.resize_token_embeddings( ... len(tokenizer) ... ) # Update the model embeddings with the new vocabulary size >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> encoded_choices = [tokenizer.encode(s) for s in choices] >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] >>> input_ids = tf.constant(encoded_choices)[None, :] # Batch size: 1, number of choices: 2 >>> mc_token_ids = tf.constant([cls_token_location]) # Batch size: 1 >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) >>> lm_prediction_scores, mc_prediction_scores = outputs[:2] ```""" if input_ids is not None: input_shapes = shape_list(input_ids) else: input_shapes = shape_list(inputs_embeds)[:-1] seq_length = input_shapes[-1] flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None transformer_outputs = self.transformer( input_ids=flat_input_ids, past_key_values=past_key_values, attention_mask=flat_attention_mask, token_type_ids=flat_token_type_ids, position_ids=flat_position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=None, encoder_attention_mask=None, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) lm_logits = self.transformer.wte(hidden_states, mode="linear") mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids, training=training) mc_logits = tf.squeeze(mc_logits, axis=-1) if not return_dict: return (lm_logits, mc_logits) + transformer_outputs[1:] return TFGPT2DoubleHeadsModelOutput( logits=lm_logits, mc_logits=mc_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="mc_token_ids"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFGPT2DoubleHeadsModelOutput( logits=output.logits, mc_logits=output.mc_logits, past_key_values=pkv, hidden_states=hs, attentions=attns, ) @add_start_docstrings( """ The GPT2 Model transformer with a sequence classification head on top (linear layer). [`TFGPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models (e.g. GPT-1) do. Since it does classification on the last token, it requires to know the position of the last token. If a `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in each row of the batch). """, GPT2_START_DOCSTRING, ) class TFGPT2ForSequenceClassification(TFGPT2PreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.score = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="score", use_bias=False, ) self.transformer = TFGPT2MainLayer(config, name="transformer") @unpack_inputs @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint="microsoft/DialogRPT-updown", output_type=TFSequenceClassifierOutputWithPast, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids: Optional[TFModelInputType] = None, past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None, attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, position_ids: Optional[Union[np.ndarray, tf.Tensor]] = None, head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None, inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[Union[np.ndarray, tf.Tensor]] = None, training: Optional[bool] = False, ) -> Union[TFSequenceClassifierOutputWithPast, Tuple[tf.Tensor]]: r""" labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the cross entropy classification loss. Indices should be in `[0, ..., config.vocab_size - 1]`. """ transformer_outputs = self.transformer( input_ids=input_ids, past_key_values=past_key_values, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) logits_shape = shape_list(logits) in_logits = None if self.config.pad_token_id is None: sequence_lengths = -1 else: if input_ids is not None: sequence_lengths = ( tf.reduce_sum( tf.cast( tf.math.not_equal(input_ids, self.config.pad_token_id), dtype=input_ids.dtype, ), -1, keepdims=False, ) - 1 ) in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " "unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) loss = None if labels is not None: assert ( self.config.pad_token_id is not None or logits_shape[0] == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if not tf.is_tensor(sequence_lengths): in_logits = logits[0 : logits_shape[0], sequence_lengths] loss = self.hf_compute_loss(tf.reshape(labels, [-1]), tf.reshape(in_logits, [-1, self.num_labels])) pooled_logits = in_logits if in_logits is not None else logits if not return_dict: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutputWithPast( loss=loss, logits=pooled_logits, past_key_values=transformer_outputs.past_key_values, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def serving_output(self, output): pkv = tf.convert_to_tensor(output.past_key_values) if self.config.use_cache else None hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFSequenceClassifierOutputWithPast( logits=output.logits, past_key_values=pkv, hidden_states=hs, attentions=attns )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/albert/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_albert"] = ["AlbertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_albert"] = [ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", "AlbertForQuestionAnswering", "AlbertForSequenceClassification", "AlbertForTokenClassification", "AlbertModel", "AlbertPreTrainedModel", "load_tf_weights_in_albert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_albert"] = [ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", "TFAlbertForQuestionAnswering", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertMainLayer", "TFAlbertModel", "TFAlbertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_albert"] = [ "FlaxAlbertForMaskedLM", "FlaxAlbertForMultipleChoice", "FlaxAlbertForPreTraining", "FlaxAlbertForQuestionAnswering", "FlaxAlbertForSequenceClassification", "FlaxAlbertForTokenClassification", "FlaxAlbertModel", "FlaxAlbertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) _import_structure = { "configuration_albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig", "AlbertOnnxConfig"], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_albert"] = ["AlbertTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_albert_fast"] = ["AlbertTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_albert"] = [ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", "AlbertForQuestionAnswering", "AlbertForSequenceClassification", "AlbertForTokenClassification", "AlbertModel", "AlbertPreTrainedModel", "load_tf_weights_in_albert", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_tf_albert"] = [ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", "TFAlbertForQuestionAnswering", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertMainLayer", "TFAlbertModel", "TFAlbertPreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["modeling_flax_albert"] = [ "FlaxAlbertForMaskedLM", "FlaxAlbertForMultipleChoice", "FlaxAlbertForPreTraining", "FlaxAlbertForQuestionAnswering", "FlaxAlbertForSequenceClassification", "FlaxAlbertForTokenClassification", "FlaxAlbertModel", "FlaxAlbertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert import AlbertTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_albert_fast import AlbertTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_albert import ( FlaxAlbertForMaskedLM, FlaxAlbertForMultipleChoice, FlaxAlbertForPreTraining, FlaxAlbertForQuestionAnswering, FlaxAlbertForSequenceClassification, FlaxAlbertForTokenClassification, FlaxAlbertModel, FlaxAlbertPreTrainedModel, ) else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./examples/research_projects/rag/lightning_base.py
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version logger = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") MODEL_MODES = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeq2SeqLM, "translation": AutoModelForSeq2SeqLM, } # update this and the import above to support new schedulers from transformers.optimization arg_to_scheduler = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs, ): """Initialize a model, tokenizer and config.""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: PretrainedConfig = config extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() ) scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.hparams.adafactor: optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False ) else: optimizer = AdamW( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon ) self.opt = optimizer scheduler = self.get_lr_scheduler() return [optimizer], [scheduler] def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def total_steps(self) -> int: """The number of total training steps that will be run. Used for lr scheduler purposes.""" num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def setup(self, stage): if stage == "test": self.dataset_size = len(self.test_dataloader().dataset) else: self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) self.dataset_size = len(self.train_dataloader().dataset) def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): raise NotImplementedError("You must implement this for your task") def train_dataloader(self): return self.train_loader def val_dataloader(self): return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) @staticmethod def add_model_specific_args(parser, root_dir): parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--lr_scheduler", default="linear", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help="Learning rate scheduler", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) parser.add_argument("--adafactor", action="store_true") class InitCallback(pl.Callback): # This method is better that using a custom DDP plugging with the latest pytorch-lightning (@shamanez) def on_sanity_check_start(self, trainer, pl_module): if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class LoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lr_scheduler = trainer.lr_schedulers[0]["scheduler"] lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} pl_module.logger.log_metrics(lrs) def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def add_generic_args(parser, root_dir) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O2", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", dest="accumulate_grad_batches", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=None, logger=True, # can pass WandbLogger() here custom_ddp_plugin=None, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs, ): pl.seed_everything(args.seed) # init model odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) # add custom checkpoints if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(early_stopping_callback) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} # TODO: remove with PyTorch 1.6 since pl uses native amp if args.fp16: train_params["precision"] = 16 # train_params["amp_level"] = args.fp16_opt_level if args.gpus > 1: train_params["accelerator"] = "auto" # "ddp" train_params["strategy"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches train_params["profiler"] = None # extra_train_kwargs.get("profiler", None) #get unwanted logs train_params["devices"] = "auto" trainer = pl.Trainer.from_argparse_args( args, weights_summary=None, callbacks=[logging_callback] + extra_callbacks + [checkpoint_callback] + [InitCallback()], # plugins=[custom_ddp_plugin], logger=logger, **train_params, ) if args.do_train: trainer.fit(model) return trainer
import argparse import logging import os from pathlib import Path from typing import Any, Dict import pytorch_lightning as pl from pytorch_lightning.utilities import rank_zero_info from transformers import ( AdamW, AutoConfig, AutoModel, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTokenClassification, AutoModelWithLMHead, AutoTokenizer, PretrainedConfig, PreTrainedTokenizer, ) from transformers.optimization import ( Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from transformers.utils.versions import require_version logger = logging.getLogger(__name__) require_version("pytorch_lightning>=1.0.4") MODEL_MODES = { "base": AutoModel, "sequence-classification": AutoModelForSequenceClassification, "question-answering": AutoModelForQuestionAnswering, "pretraining": AutoModelForPreTraining, "token-classification": AutoModelForTokenClassification, "language-modeling": AutoModelWithLMHead, "summarization": AutoModelForSeq2SeqLM, "translation": AutoModelForSeq2SeqLM, } # update this and the import above to support new schedulers from transformers.optimization arg_to_scheduler = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" class BaseTransformer(pl.LightningModule): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs, ): """Initialize a model, tokenizer and config.""" super().__init__() # TODO: move to self.save_hyperparameters() # self.save_hyperparameters() # can also expand arguments into trainer signature for easier reading self.save_hyperparameters(hparams) self.step_count = 0 self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: PretrainedConfig = config extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: PreTrainedTokenizer = tokenizer self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps() ) scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.hparams.adafactor: optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False ) else: optimizer = AdamW( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon ) self.opt = optimizer scheduler = self.get_lr_scheduler() return [optimizer], [scheduler] def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) def total_steps(self) -> int: """The number of total training steps that will be run. Used for lr scheduler purposes.""" num_devices = max(1, self.hparams.gpus) # TODO: consider num_tpu_cores effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs def setup(self, stage): if stage == "test": self.dataset_size = len(self.test_dataloader().dataset) else: self.train_loader = self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) self.dataset_size = len(self.train_dataloader().dataset) def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False): raise NotImplementedError("You must implement this for your task") def train_dataloader(self): return self.train_loader def val_dataloader(self): return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) @pl.utilities.rank_zero_only def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) @staticmethod def add_model_specific_args(parser, root_dir): parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default=None, type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--lr_scheduler", default="linear", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help="Learning rate scheduler", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument("--num_train_epochs", dest="max_epochs", default=3, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) parser.add_argument("--adafactor", action="store_true") class InitCallback(pl.Callback): # This method is better that using a custom DDP plugging with the latest pytorch-lightning (@shamanez) def on_sanity_check_start(self, trainer, pl_module): if ( trainer.is_global_zero and trainer.global_rank == 0 ): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed. pl_module.model.rag.retriever.init_retrieval() # better to use hook functions. class LoggingCallback(pl.Callback): def on_batch_end(self, trainer, pl_module): lr_scheduler = trainer.lr_schedulers[0]["scheduler"] lrs = {f"lr_group_{i}": lr for i, lr in enumerate(lr_scheduler.get_lr())} pl_module.logger.log_metrics(lrs) def on_validation_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Validation results *****") metrics = trainer.callback_metrics # Log results for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): rank_zero_info("***** Test results *****") metrics = trainer.callback_metrics # Log and save results to file output_test_results_file = os.path.join(pl_module.hparams.output_dir, "test_results.txt") with open(output_test_results_file, "w") as writer: for key in sorted(metrics): if key not in ["log", "progress_bar"]: rank_zero_info("{} = {}\n".format(key, str(metrics[key]))) writer.write("{} = {}\n".format(key, str(metrics[key]))) def add_generic_args(parser, root_dir) -> None: # To allow all pl args uncomment the following line # parser = pl.Trainer.add_argparse_args(parser) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O2", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", dest="accumulate_grad_batches", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) def generic_train( model: BaseTransformer, args: argparse.Namespace, early_stopping_callback=None, logger=True, # can pass WandbLogger() here custom_ddp_plugin=None, extra_callbacks=[], checkpoint_callback=None, logging_callback=None, **extra_train_kwargs, ): pl.seed_everything(args.seed) # init model odir = Path(model.hparams.output_dir) odir.mkdir(exist_ok=True) # add custom checkpoints if checkpoint_callback is None: checkpoint_callback = pl.callbacks.ModelCheckpoint( filepath=args.output_dir, prefix="checkpoint", monitor="val_loss", mode="min", save_top_k=1 ) if early_stopping_callback: extra_callbacks.append(early_stopping_callback) if logging_callback is None: logging_callback = LoggingCallback() train_params = {} # TODO: remove with PyTorch 1.6 since pl uses native amp if args.fp16: train_params["precision"] = 16 # train_params["amp_level"] = args.fp16_opt_level if args.gpus > 1: train_params["accelerator"] = "auto" # "ddp" train_params["strategy"] = "ddp" train_params["accumulate_grad_batches"] = args.accumulate_grad_batches train_params["profiler"] = None # extra_train_kwargs.get("profiler", None) #get unwanted logs train_params["devices"] = "auto" trainer = pl.Trainer.from_argparse_args( args, weights_summary=None, callbacks=[logging_callback] + extra_callbacks + [checkpoint_callback] + [InitCallback()], # plugins=[custom_ddp_plugin], logger=logger, **train_params, ) if args.do_train: trainer.fit(model) return trainer
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/trocr/configuration_trocr.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TrOCR model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class TrOCRConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TrOCRForCausalLM`]. It is used to instantiate an TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrOCR [microsoft/trocr-base-handwritten](https://huggingface.co/microsoft/trocr-base-handwritten) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the TrOCR model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`TrOCRForCausalLM`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). scale_embedding (`bool`, *optional*, defaults to `False`): Whether or not to scale the word embeddings by sqrt(d_model). use_learned_position_embeddings (`bool`, *optional*, defaults to `True`): Whether or not to use learned position embeddings. If not, sinusoidal position embeddings will be used. layernorm_embedding (`bool`, *optional*, defaults to `True`): Whether or not to use a layernorm after the word + position embeddings. Example: ```python >>> from transformers import TrOCRConfig, TrOCRForCausalLM >>> # Initializing a TrOCR-base style configuration >>> configuration = TrOCRConfig() >>> # Initializing a model (with random weights) from the TrOCR-base style configuration >>> model = TrOCRForCausalLM(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "trocr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self, vocab_size=50265, d_model=1024, decoder_layers=12, decoder_attention_heads=16, decoder_ffn_dim=4096, activation_function="gelu", max_position_embeddings=512, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, decoder_start_token_id=2, init_std=0.02, decoder_layerdrop=0.0, use_cache=True, scale_embedding=False, use_learned_position_embeddings=True, layernorm_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.activation_function = activation_function self.max_position_embeddings = max_position_embeddings self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.init_std = init_std self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.scale_embedding = scale_embedding self.use_learned_position_embeddings = use_learned_position_embeddings self.layernorm_embedding = layernorm_embedding super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, )
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TrOCR model configuration""" from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP = { "microsoft/trocr-base-handwritten": ( "https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json" ), # See all TrOCR models at https://huggingface.co/models?filter=trocr } class TrOCRConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`TrOCRForCausalLM`]. It is used to instantiate an TrOCR model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the TrOCR [microsoft/trocr-base-handwritten](https://huggingface.co/microsoft/trocr-base-handwritten) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 50265): Vocabulary size of the TrOCR model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`TrOCRForCausalLM`]. d_model (`int`, *optional*, defaults to 1024): Dimensionality of the layers and the pooler layer. decoder_layers (`int`, *optional*, defaults to 12): Number of decoder layers. decoder_attention_heads (`int`, *optional*, defaults to 16): Number of attention heads for each attention layer in the Transformer decoder. decoder_ffn_dim (`int`, *optional*, defaults to 4096): Dimensionality of the "intermediate" (often named feed-forward) layer in decoder. activation_function (`str` or `function`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). dropout (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, and pooler. attention_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for the attention probabilities. activation_dropout (`float`, *optional*, defaults to 0.0): The dropout ratio for activations inside the fully connected layer. init_std (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. decoder_layerdrop (`float`, *optional*, defaults to 0.0): The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://arxiv.org/abs/1909.11556) for more details. use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). scale_embedding (`bool`, *optional*, defaults to `False`): Whether or not to scale the word embeddings by sqrt(d_model). use_learned_position_embeddings (`bool`, *optional*, defaults to `True`): Whether or not to use learned position embeddings. If not, sinusoidal position embeddings will be used. layernorm_embedding (`bool`, *optional*, defaults to `True`): Whether or not to use a layernorm after the word + position embeddings. Example: ```python >>> from transformers import TrOCRConfig, TrOCRForCausalLM >>> # Initializing a TrOCR-base style configuration >>> configuration = TrOCRConfig() >>> # Initializing a model (with random weights) from the TrOCR-base style configuration >>> model = TrOCRForCausalLM(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "trocr" keys_to_ignore_at_inference = ["past_key_values"] attribute_map = { "num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model", "num_hidden_layers": "decoder_layers", } def __init__( self, vocab_size=50265, d_model=1024, decoder_layers=12, decoder_attention_heads=16, decoder_ffn_dim=4096, activation_function="gelu", max_position_embeddings=512, dropout=0.1, attention_dropout=0.0, activation_dropout=0.0, decoder_start_token_id=2, init_std=0.02, decoder_layerdrop=0.0, use_cache=True, scale_embedding=False, use_learned_position_embeddings=True, layernorm_embedding=True, pad_token_id=1, bos_token_id=0, eos_token_id=2, **kwargs, ): self.vocab_size = vocab_size self.d_model = d_model self.decoder_layers = decoder_layers self.decoder_attention_heads = decoder_attention_heads self.decoder_ffn_dim = decoder_ffn_dim self.activation_function = activation_function self.max_position_embeddings = max_position_embeddings self.dropout = dropout self.attention_dropout = attention_dropout self.activation_dropout = activation_dropout self.init_std = init_std self.decoder_layerdrop = decoder_layerdrop self.use_cache = use_cache self.scale_embedding = scale_embedding self.use_learned_position_embeddings = use_learned_position_embeddings self.layernorm_embedding = layernorm_embedding super().__init__( pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, decoder_start_token_id=decoder_start_token_id, **kwargs, )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./examples/research_projects/jax-projects/big_bird/train.py
import os from dataclasses import replace import jax import wandb from bigbird_flax import Args, DataCollator, FlaxBigBirdForNaturalQuestions, Trainer, build_tx, train_step, val_step from datasets import load_dataset from flax import jax_utils from transformers import BigBirdTokenizerFast if __name__ == "__main__": print("#################### AVAILABLE DEVICES ####################") print(jax.devices()) print("###########################################################") # setup for wandb sweep args = Args() logger = wandb.init(project="bigbird-natural-questions", config=args.__dict__) wandb_args = dict(logger.config) del wandb_args["batch_size"] args = replace(args, **wandb_args) base_dir = args.base_dir + "-" + wandb.run.id args = replace(args, base_dir=base_dir) print(args) tr_dataset = load_dataset("json", data_files=args.tr_data_path)["train"] val_dataset = load_dataset("json", data_files=args.val_data_path)["train"] # drop extra batch for now indices = range(len(tr_dataset) - len(tr_dataset) % args.batch_size) tr_dataset = tr_dataset.shuffle().select(indices) indices = range(len(val_dataset) - len(val_dataset) % args.batch_size) val_dataset = val_dataset.shuffle().select(indices) if os.environ.get("TRAIN_ON_SMALL", "false") == "true": tr_dataset = tr_dataset.shuffle().select(range(80000)) val_dataset = val_dataset.shuffle().select(range(8000)) print(tr_dataset) print(val_dataset) model = FlaxBigBirdForNaturalQuestions.from_pretrained( args.model_id, block_size=args.block_size, num_random_blocks=args.num_random_blocks ) tokenizer = BigBirdTokenizerFast.from_pretrained(args.model_id) data_collator = DataCollator(pad_id=tokenizer.pad_token_id, max_length=4096) tx_args = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": args.max_epochs * (len(tr_dataset) // args.batch_size), "weight_decay": args.weight_decay, } tx, lr = build_tx(**tx_args) trainer = Trainer( args=args, data_collator=data_collator, model_save_fn=model.save_pretrained, train_step_fn=train_step, val_step_fn=val_step, logger=logger, scheduler_fn=lr, ) ckpt_dir = None state = trainer.create_state(model, tx, num_train_steps=tx_args["num_train_steps"], ckpt_dir=ckpt_dir) try: trainer.train(state, tr_dataset, val_dataset) except KeyboardInterrupt: print("Oooops; TRAINING STOPPED UNFORTUNATELY") print("SAVING WEIGHTS IN `final-weights`") params = jax_utils.unreplicate(state.params) model.save_pretrained(os.path.join(args.base_dir, "final-weights"), params=params)
import os from dataclasses import replace import jax import wandb from bigbird_flax import Args, DataCollator, FlaxBigBirdForNaturalQuestions, Trainer, build_tx, train_step, val_step from datasets import load_dataset from flax import jax_utils from transformers import BigBirdTokenizerFast if __name__ == "__main__": print("#################### AVAILABLE DEVICES ####################") print(jax.devices()) print("###########################################################") # setup for wandb sweep args = Args() logger = wandb.init(project="bigbird-natural-questions", config=args.__dict__) wandb_args = dict(logger.config) del wandb_args["batch_size"] args = replace(args, **wandb_args) base_dir = args.base_dir + "-" + wandb.run.id args = replace(args, base_dir=base_dir) print(args) tr_dataset = load_dataset("json", data_files=args.tr_data_path)["train"] val_dataset = load_dataset("json", data_files=args.val_data_path)["train"] # drop extra batch for now indices = range(len(tr_dataset) - len(tr_dataset) % args.batch_size) tr_dataset = tr_dataset.shuffle().select(indices) indices = range(len(val_dataset) - len(val_dataset) % args.batch_size) val_dataset = val_dataset.shuffle().select(indices) if os.environ.get("TRAIN_ON_SMALL", "false") == "true": tr_dataset = tr_dataset.shuffle().select(range(80000)) val_dataset = val_dataset.shuffle().select(range(8000)) print(tr_dataset) print(val_dataset) model = FlaxBigBirdForNaturalQuestions.from_pretrained( args.model_id, block_size=args.block_size, num_random_blocks=args.num_random_blocks ) tokenizer = BigBirdTokenizerFast.from_pretrained(args.model_id) data_collator = DataCollator(pad_id=tokenizer.pad_token_id, max_length=4096) tx_args = { "lr": args.lr, "init_lr": args.init_lr, "warmup_steps": args.warmup_steps, "num_train_steps": args.max_epochs * (len(tr_dataset) // args.batch_size), "weight_decay": args.weight_decay, } tx, lr = build_tx(**tx_args) trainer = Trainer( args=args, data_collator=data_collator, model_save_fn=model.save_pretrained, train_step_fn=train_step, val_step_fn=val_step, logger=logger, scheduler_fn=lr, ) ckpt_dir = None state = trainer.create_state(model, tx, num_train_steps=tx_args["num_train_steps"], ckpt_dir=ckpt_dir) try: trainer.train(state, tr_dataset, val_dataset) except KeyboardInterrupt: print("Oooops; TRAINING STOPPED UNFORTUNATELY") print("SAVING WEIGHTS IN `final-weights`") params = jax_utils.unreplicate(state.params) model.save_pretrained(os.path.join(args.base_dir, "final-weights"), params=params)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/swin/test_modeling_tf_swin.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TF 2.0 Swin model. """ import inspect import unittest import numpy as np from transformers import SwinConfig from transformers.testing_utils import require_tf, require_vision, slow, to_2tuple, tooslow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.swin.modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, ) if is_vision_available(): from PIL import Image from transformers import AutoFeatureExtractor class TFSwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, ) -> None: self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = TFSwinModel(config=config) result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = TFSwinForMaskedImageModeling(config=config) result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = TFSwinForMaskedImageModeling(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFSwinForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = TFSwinForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFSwinModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFSwinModel, TFSwinForImageClassification, TFSwinForMaskedImageModeling, ) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFSwinModel, "image-classification": TFSwinForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFSwinModelTester(self) self.config_tester = ConfigTester(self, config_class=SwinConfig, embed_dim=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Swin does not use inputs_embeds") def test_inputs_embeds(self): pass @tooslow def test_saved_model_creation(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), tf.keras.layers.Layer) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Swin has a different seq_length patch_size = to_2tuple(config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = tf.reshape(reshaped_hidden_states[0], (batch_size, num_channels, height * width)) reshaped_hidden_states = tf.transpose(reshaped_hidden_states, (0, 2, 1)) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = to_2tuple(self.model_tester.image_size) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_inputs_requiring_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = to_2tuple(self.model_tester.image_size) patch_size = to_2tuple(config.patch_size) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): for model_name in TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFSwinModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_vision @require_tf class TFSwinModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( AutoFeatureExtractor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFSwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224") feature_extractor = self.default_feature_extractor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = feature_extractor(images=image, return_tensors="tf") # forward pass outputs = model(inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.0948, -0.6454, -0.0921]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the TF 2.0 Swin model. """ import inspect import unittest import numpy as np from transformers import SwinConfig from transformers.testing_utils import require_tf, require_vision, slow, to_2tuple, tooslow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers.models.swin.modeling_tf_swin import ( TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST, TFSwinForImageClassification, TFSwinForMaskedImageModeling, TFSwinModel, ) if is_vision_available(): from PIL import Image from transformers import AutoFeatureExtractor class TFSwinModelTester: def __init__( self, parent, batch_size=13, image_size=32, patch_size=2, num_channels=3, embed_dim=16, depths=[1, 2, 1], num_heads=[2, 2, 4], window_size=2, mlp_ratio=2.0, qkv_bias=True, hidden_dropout_prob=0.0, attention_probs_dropout_prob=0.0, drop_path_rate=0.1, hidden_act="gelu", use_absolute_embeddings=False, patch_norm=True, initializer_range=0.02, layer_norm_eps=1e-5, is_training=True, scope=None, use_labels=True, type_sequence_label_size=10, encoder_stride=8, ) -> None: self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.embed_dim = embed_dim self.depths = depths self.num_heads = num_heads self.window_size = window_size self.mlp_ratio = mlp_ratio self.qkv_bias = qkv_bias self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.drop_path_rate = drop_path_rate self.hidden_act = hidden_act self.use_absolute_embeddings = use_absolute_embeddings self.patch_norm = patch_norm self.layer_norm_eps = layer_norm_eps self.initializer_range = initializer_range self.is_training = is_training self.scope = scope self.use_labels = use_labels self.type_sequence_label_size = type_sequence_label_size self.encoder_stride = encoder_stride def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return SwinConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, embed_dim=self.embed_dim, depths=self.depths, num_heads=self.num_heads, window_size=self.window_size, mlp_ratio=self.mlp_ratio, qkv_bias=self.qkv_bias, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, drop_path_rate=self.drop_path_rate, hidden_act=self.hidden_act, use_absolute_embeddings=self.use_absolute_embeddings, path_norm=self.patch_norm, layer_norm_eps=self.layer_norm_eps, initializer_range=self.initializer_range, encoder_stride=self.encoder_stride, ) def create_and_check_model(self, config, pixel_values, labels): model = TFSwinModel(config=config) result = model(pixel_values) expected_seq_len = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) expected_dim = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, expected_seq_len, expected_dim)) def create_and_check_for_masked_image_modeling(self, config, pixel_values, labels): model = TFSwinForMaskedImageModeling(config=config) result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images config.num_channels = 1 model = TFSwinForMaskedImageModeling(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, 1, self.image_size, self.image_size)) def create_and_check_for_image_classification(self, config, pixel_values, labels): config.num_labels = self.type_sequence_label_size model = TFSwinForImageClassification(config) result = model(pixel_values, labels=labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) # test greyscale images config.num_channels = 1 model = TFSwinForImageClassification(config) pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_tf class TFSwinModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( TFSwinModel, TFSwinForImageClassification, TFSwinForMaskedImageModeling, ) if is_tf_available() else () ) pipeline_model_mapping = ( {"feature-extraction": TFSwinModel, "image-classification": TFSwinForImageClassification} if is_tf_available() else {} ) test_pruning = False test_resize_embeddings = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFSwinModelTester(self) self.config_tester = ConfigTester(self, config_class=SwinConfig, embed_dim=37) def test_config(self): self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def create_and_test_config_common_properties(self): return def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_image_modeling(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*config_and_inputs) def test_for_image_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*config_and_inputs) @unittest.skip(reason="Swin does not use inputs_embeds") def test_inputs_embeds(self): pass @tooslow def test_saved_model_creation(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), tf.keras.layers.Layer) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, tf.keras.layers.Dense)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = len(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True window_size_squared = config.window_size**2 model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) if hasattr(self.model_tester, "num_hidden_states_types"): added_hidden_states = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_heads[0], window_size_squared, window_size_squared], ) def check_hidden_states_output(self, inputs_dict, config, model_class, image_size): model = model_class(config) outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", len(self.model_tester.depths) + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) # Swin has a different seq_length patch_size = to_2tuple(config.patch_size) num_patches = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]), [num_patches, self.model_tester.embed_dim], ) reshaped_hidden_states = outputs.reshaped_hidden_states self.assertEqual(len(reshaped_hidden_states), expected_num_layers) batch_size, num_channels, height, width = reshaped_hidden_states[0].shape reshaped_hidden_states = tf.reshape(reshaped_hidden_states[0], (batch_size, num_channels, height * width)) reshaped_hidden_states = tf.transpose(reshaped_hidden_states, (0, 2, 1)) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]), [num_patches, self.model_tester.embed_dim], ) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() image_size = to_2tuple(self.model_tester.image_size) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, image_size) def test_inputs_requiring_padding(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.patch_size = 3 image_size = to_2tuple(self.model_tester.image_size) patch_size = to_2tuple(config.patch_size) padded_height = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) padded_width = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True self.check_hidden_states_output(inputs_dict, config, model_class, (padded_height, padded_width)) @slow def test_model_from_pretrained(self): for model_name in TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TFSwinModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_vision @require_tf class TFSwinModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ( AutoFeatureExtractor.from_pretrained("microsoft/swin-tiny-patch4-window7-224") if is_vision_available() else None ) @slow def test_inference_image_classification_head(self): model = TFSwinForImageClassification.from_pretrained("microsoft/swin-tiny-patch4-window7-224") feature_extractor = self.default_feature_extractor image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") inputs = feature_extractor(images=image, return_tensors="tf") # forward pass outputs = model(inputs) # verify the logits expected_shape = tf.TensorShape((1, 1000)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = tf.constant([-0.0948, -0.6454, -0.0921]) self.assertTrue(np.allclose(outputs.logits[0, :3], expected_slice, atol=1e-4))
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/roberta_prelayernorm/modeling_flax_roberta_prelayernorm.py
# coding=utf-8 # Copyright 2022 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax RoBERTa-PreLayerNorm model.""" from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import ( FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxBaseModelOutputWithPooling, FlaxBaseModelOutputWithPoolingAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput, ) from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_roberta_prelayernorm import RobertaPreLayerNormConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "andreasmadsen/efficient_mlm_m0.40" _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig" remat = nn_partitioning.remat # Copied from transformers.models.roberta.modeling_flax_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: input_ids: jnp.ndarray padding_idx: int Returns: jnp.ndarray """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = (input_ids != padding_idx).astype("i4") if mask.ndim > 2: mask = mask.reshape((-1, mask.shape[-1])) incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask incremental_indices = incremental_indices.reshape(input_ids.shape) else: incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask return incremental_indices.astype("i4") + padding_idx ROBERTA_PRELAYERNORM_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`RobertaPreLayerNormConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. """ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`numpy.ndarray` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormSelfAttention(nn.Module): config: RobertaPreLayerNormConfig causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " " : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.array] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.query(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: # self_attention key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxRobertaPreLayerNormSelfOutput(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + input_tensor return hidden_states class FlaxRobertaPreLayerNormAttention(nn.Module): config: RobertaPreLayerNormConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxRobertaPreLayerNormSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxRobertaPreLayerNormSelfOutput(self.config, dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): hidden_states_pre_layer_norm = self.LayerNorm(hidden_states) # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states_pre_layer_norm, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxRobertaPreLayerNormIntermediate(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxRobertaPreLayerNormOutput(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + attention_output return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLayer(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxRobertaPreLayerNormAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxRobertaPreLayerNormIntermediate(self.config, dtype=self.dtype) self.output = FlaxRobertaPreLayerNormOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxRobertaPreLayerNormAttention(self.config, causal=False, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLayerCollection(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxRobertaPreLayerNormCheckpointLayer = remat(FlaxRobertaPreLayerNormLayer, static_argnums=(5, 6, 7)) self.layers = [ FlaxRobertaPreLayerNormCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] else: self.layers = [ FlaxRobertaPreLayerNormLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for " f" {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormEncoder(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = FlaxRobertaPreLayerNormLayerCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormPooler(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaLMHead with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLMHead(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.decoder = nn.Dense( self.config.vocab_size, dtype=self.dtype, use_bias=False, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.dense(hidden_states) hidden_states = ACT2FN["gelu"](hidden_states) hidden_states = self.layer_norm(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaClassificationHead with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormClassificationHead(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.out_proj = nn.Dense( self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) def __call__(self, hidden_states, deterministic=True): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = nn.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaPreTrainedModel with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RobertaPreLayerNormConfig base_model_prefix = "roberta_prelayernorm" module_class: nn.Module = None def __init__( self, config: RobertaPreLayerNormConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.ones_like(input_ids) position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids, dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, past_key_values: dict = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxRobertaPreLayerNormAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) return outputs class FlaxRobertaPreLayerNormModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxRobertaPreLayerNormEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxRobertaPreLayerNormEncoder( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.pooler = FlaxRobertaPreLayerNormPooler(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # make sure `token_type_ids` is correctly initialized when not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # make sure `position_ids` is correctly initialized when not passed if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings( input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic ) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.LayerNorm(hidden_states) pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( "The bare RoBERTa-PreLayerNorm Model transformer outputting raw hidden-states without any specific head on top.", ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaModel with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormModel(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormModule append_call_sample_docstring( FlaxRobertaPreLayerNormModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLMModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForMaskedLMModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][ "embedding" ] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """RoBERTa-PreLayerNorm Model with a `language modeling` head on top.""", ROBERTA_PRELAYERNORM_START_DOCSTRING ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLM with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForMaskedLM(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForMaskedLMModule append_call_sample_docstring( FlaxRobertaPreLayerNormForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC, mask="<mask>", ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassificationModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForSequenceClassificationModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.classifier = FlaxRobertaPreLayerNormClassificationHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, deterministic=deterministic) if not return_dict: return (logits,) + outputs[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassification with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForSequenceClassification(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForSequenceClassificationModule append_call_sample_docstring( FlaxRobertaPreLayerNormForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForMultipleChoiceModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForMultipleChoiceModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMultipleChoice with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForMultipleChoice(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForMultipleChoiceModule overwrite_call_docstring( FlaxRobertaPreLayerNormForMultipleChoice, ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"), ) append_call_sample_docstring( FlaxRobertaPreLayerNormForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForTokenClassificationModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForTokenClassificationModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForTokenClassification with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForTokenClassification(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForTokenClassificationModule append_call_sample_docstring( FlaxRobertaPreLayerNormForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForQuestionAnsweringModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForQuestionAnsweringModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForQuestionAnswering with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForQuestionAnswering(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForQuestionAnsweringModule append_call_sample_docstring( FlaxRobertaPreLayerNormForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLMModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForCausalLMModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][ "embedding" ] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for autoregressive tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLM with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForCausalLM(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jnp.DeviceArray] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs append_call_sample_docstring( FlaxRobertaPreLayerNormForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, )
# coding=utf-8 # Copyright 2022 The Google Flax Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flax RoBERTa-PreLayerNorm model.""" from typing import Callable, Optional, Tuple import flax.linen as nn import jax import jax.numpy as jnp import numpy as np from flax.core.frozen_dict import FrozenDict, freeze, unfreeze from flax.linen import combine_masks, make_causal_mask from flax.linen import partitioning as nn_partitioning from flax.linen.attention import dot_product_attention_weights from flax.traverse_util import flatten_dict, unflatten_dict from jax import lax from ...modeling_flax_outputs import ( FlaxBaseModelOutputWithPastAndCrossAttentions, FlaxBaseModelOutputWithPooling, FlaxBaseModelOutputWithPoolingAndCrossAttentions, FlaxCausalLMOutputWithCrossAttentions, FlaxMaskedLMOutput, FlaxMultipleChoiceModelOutput, FlaxQuestionAnsweringModelOutput, FlaxSequenceClassifierOutput, FlaxTokenClassifierOutput, ) from ...modeling_flax_utils import ACT2FN, FlaxPreTrainedModel, append_call_sample_docstring, overwrite_call_docstring from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_roberta_prelayernorm import RobertaPreLayerNormConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "andreasmadsen/efficient_mlm_m0.40" _CONFIG_FOR_DOC = "RobertaPreLayerNormConfig" remat = nn_partitioning.remat # Copied from transformers.models.roberta.modeling_flax_roberta.create_position_ids_from_input_ids def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: input_ids: jnp.ndarray padding_idx: int Returns: jnp.ndarray """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = (input_ids != padding_idx).astype("i4") if mask.ndim > 2: mask = mask.reshape((-1, mask.shape[-1])) incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask incremental_indices = incremental_indices.reshape(input_ids.shape) else: incremental_indices = jnp.cumsum(mask, axis=1).astype("i4") * mask return incremental_indices.astype("i4") + padding_idx ROBERTA_PRELAYERNORM_START_DOCSTRING = r""" This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading, saving and converting weights from PyTorch models) This model is also a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: config ([`RobertaPreLayerNormConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights. """ ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING = r""" Args: input_ids (`numpy.ndarray` of shape `({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) attention_mask (`numpy.ndarray` of shape `({0})`, *optional*): Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) token_type_ids (`numpy.ndarray` of shape `({0})`, *optional*): Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, 1]`: - 0 corresponds to a *sentence A* token, - 1 corresponds to a *sentence B* token. [What are token type IDs?](../glossary#token-type-ids) position_ids (`numpy.ndarray` of shape `({0})`, *optional*): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, config.max_position_embeddings - 1]`. head_mask (`numpy.ndarray` of shape `({0})`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in `[0, 1]`: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEmbeddings with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.word_embeddings = nn.Embed( self.config.vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.position_embeddings = nn.Embed( self.config.max_position_embeddings, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.token_type_embeddings = nn.Embed( self.config.type_vocab_size, self.config.hidden_size, embedding_init=jax.nn.initializers.normal(stddev=self.config.initializer_range), dtype=self.dtype, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, input_ids, token_type_ids, position_ids, attention_mask, deterministic: bool = True): # Embed inputs_embeds = self.word_embeddings(input_ids.astype("i4")) position_embeds = self.position_embeddings(position_ids.astype("i4")) token_type_embeddings = self.token_type_embeddings(token_type_ids.astype("i4")) # Sum all embeddings hidden_states = inputs_embeds + token_type_embeddings + position_embeds # Layer Norm hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertSelfAttention with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormSelfAttention(nn.Module): config: RobertaPreLayerNormConfig causal: bool = False dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.head_dim = self.config.hidden_size // self.config.num_attention_heads if self.config.hidden_size % self.config.num_attention_heads != 0: raise ValueError( "`config.hidden_size`: {self.config.hidden_size} has to be a multiple of `config.num_attention_heads` " " : {self.config.num_attention_heads}" ) self.query = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.key = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.value = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) if self.causal: self.causal_mask = make_causal_mask( jnp.ones((1, self.config.max_position_embeddings), dtype="bool"), dtype="bool" ) def _split_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.num_attention_heads, self.head_dim)) def _merge_heads(self, hidden_states): return hidden_states.reshape(hidden_states.shape[:2] + (self.config.hidden_size,)) @nn.compact # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartAttention._concatenate_to_cache def _concatenate_to_cache(self, key, value, query, attention_mask): """ This function takes projected key, value states from a single input token and concatenates the states to cached states from previous steps. This function is slighly adapted from the official Flax repository: https://github.com/google/flax/blob/491ce18759622506588784b4fca0e4bf05f8c8cd/flax/linen/attention.py#L252 """ # detect if we're initializing by absence of existing cache data. is_initialized = self.has_variable("cache", "cached_key") cached_key = self.variable("cache", "cached_key", jnp.zeros, key.shape, key.dtype) cached_value = self.variable("cache", "cached_value", jnp.zeros, value.shape, value.dtype) cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32)) if is_initialized: *batch_dims, max_length, num_heads, depth_per_head = cached_key.value.shape # update key, value caches with our new 1d spatial slices cur_index = cache_index.value indices = (0,) * len(batch_dims) + (cur_index, 0, 0) key = lax.dynamic_update_slice(cached_key.value, key, indices) value = lax.dynamic_update_slice(cached_value.value, value, indices) cached_key.value = key cached_value.value = value num_updated_cache_vectors = query.shape[1] cache_index.value = cache_index.value + num_updated_cache_vectors # causal mask for cached decoder self-attention: our single query position should only attend to those key positions that have already been generated and cached, not the remaining zero elements. pad_mask = jnp.broadcast_to( jnp.arange(max_length) < cur_index + num_updated_cache_vectors, tuple(batch_dims) + (1, num_updated_cache_vectors, max_length), ) attention_mask = combine_masks(pad_mask, attention_mask) return key, value, attention_mask def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states: Optional[jnp.array] = None, init_cache: bool = False, deterministic=True, output_attentions: bool = False, ): # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None batch_size = hidden_states.shape[0] # get query proj query_states = self.query(hidden_states) # get key, value proj if is_cross_attention: # cross_attentions key_states = self.key(key_value_states) value_states = self.value(key_value_states) else: # self_attention key_states = self.key(hidden_states) value_states = self.value(hidden_states) query_states = self._split_heads(query_states) key_states = self._split_heads(key_states) value_states = self._split_heads(value_states) # handle cache prepare causal attention mask if self.causal: query_length, key_length = query_states.shape[1], key_states.shape[1] if self.has_variable("cache", "cached_key"): mask_shift = self.variables["cache"]["cache_index"] max_decoder_length = self.variables["cache"]["cached_key"].shape[1] causal_mask = lax.dynamic_slice( self.causal_mask, (0, 0, mask_shift, 0), (1, 1, query_length, max_decoder_length) ) else: causal_mask = self.causal_mask[:, :, :query_length, :key_length] causal_mask = jnp.broadcast_to(causal_mask, (batch_size,) + causal_mask.shape[1:]) # combine masks if needed if attention_mask is not None and self.causal: attention_mask = jnp.broadcast_to(jnp.expand_dims(attention_mask, axis=(-3, -2)), causal_mask.shape) attention_mask = combine_masks(attention_mask, causal_mask) elif self.causal: attention_mask = causal_mask elif attention_mask is not None: attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2)) # During fast autoregressive decoding, we feed one position at a time, # and cache the keys and values step by step. if self.causal and (self.has_variable("cache", "cached_key") or init_cache): key_states, value_states, attention_mask = self._concatenate_to_cache( key_states, value_states, query_states, attention_mask ) # Convert the boolean attention mask to an attention bias. if attention_mask is not None: # attention mask in the form of attention bias attention_bias = lax.select( attention_mask > 0, jnp.full(attention_mask.shape, 0.0).astype(self.dtype), jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype), ) else: attention_bias = None dropout_rng = None if not deterministic and self.config.attention_probs_dropout_prob > 0.0: dropout_rng = self.make_rng("dropout") attn_weights = dot_product_attention_weights( query_states, key_states, bias=attention_bias, dropout_rng=dropout_rng, dropout_rate=self.config.attention_probs_dropout_prob, broadcast_dropout=True, deterministic=deterministic, dtype=self.dtype, precision=None, ) # Mask heads if we want to if layer_head_mask is not None: attn_weights = jnp.einsum("...hqk,h->...hqk", attn_weights, layer_head_mask) attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value_states) attn_output = attn_output.reshape(attn_output.shape[:2] + (-1,)) outputs = (attn_output, attn_weights) if output_attentions else (attn_output,) return outputs class FlaxRobertaPreLayerNormSelfOutput(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, input_tensor, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + input_tensor return hidden_states class FlaxRobertaPreLayerNormAttention(nn.Module): config: RobertaPreLayerNormConfig causal: bool = False dtype: jnp.dtype = jnp.float32 def setup(self): self.self = FlaxRobertaPreLayerNormSelfAttention(self.config, causal=self.causal, dtype=self.dtype) self.output = FlaxRobertaPreLayerNormSelfOutput(self.config, dtype=self.dtype) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, key_value_states=None, init_cache=False, deterministic=True, output_attentions: bool = False, ): hidden_states_pre_layer_norm = self.LayerNorm(hidden_states) # Attention mask comes in as attention_mask.shape == (*batch_sizes, kv_length) # FLAX expects: attention_mask.shape == (*batch_sizes, 1, 1, kv_length) such that it is broadcastable # with attn_weights.shape == (*batch_sizes, num_heads, q_length, kv_length) attn_outputs = self.self( hidden_states_pre_layer_norm, attention_mask, layer_head_mask=layer_head_mask, key_value_states=key_value_states, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attn_output = attn_outputs[0] hidden_states = self.output(attn_output, hidden_states, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attn_outputs[1],) return outputs class FlaxRobertaPreLayerNormIntermediate(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.dense = nn.Dense( self.config.intermediate_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.activation = ACT2FN[self.config.hidden_act] def __call__(self, hidden_states): hidden_states = self.LayerNorm(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states class FlaxRobertaPreLayerNormOutput(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) def __call__(self, hidden_states, attention_output, deterministic: bool = True): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = hidden_states + attention_output return hidden_states # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayer with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLayer(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.attention = FlaxRobertaPreLayerNormAttention(self.config, causal=self.config.is_decoder, dtype=self.dtype) self.intermediate = FlaxRobertaPreLayerNormIntermediate(self.config, dtype=self.dtype) self.output = FlaxRobertaPreLayerNormOutput(self.config, dtype=self.dtype) if self.config.add_cross_attention: self.crossattention = FlaxRobertaPreLayerNormAttention(self.config, causal=False, dtype=self.dtype) def __call__( self, hidden_states, attention_mask, layer_head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, ): # Self Attention attention_outputs = self.attention( hidden_states, attention_mask, layer_head_mask=layer_head_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = attention_outputs[0] # Cross-Attention Block if encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, key_value_states=encoder_hidden_states, deterministic=deterministic, output_attentions=output_attentions, ) attention_output = cross_attention_outputs[0] hidden_states = self.intermediate(attention_output) hidden_states = self.output(hidden_states, attention_output, deterministic=deterministic) outputs = (hidden_states,) if output_attentions: outputs += (attention_outputs[1],) if encoder_hidden_states is not None: outputs += (cross_attention_outputs[1],) return outputs # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertLayerCollection with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLayerCollection(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): if self.gradient_checkpointing: FlaxRobertaPreLayerNormCheckpointLayer = remat(FlaxRobertaPreLayerNormLayer, static_argnums=(5, 6, 7)) self.layers = [ FlaxRobertaPreLayerNormCheckpointLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] else: self.layers = [ FlaxRobertaPreLayerNormLayer(self.config, name=str(i), dtype=self.dtype) for i in range(self.config.num_hidden_layers) ] def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): all_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None # Check if head_mask has a correct number of layers specified if desired if head_mask is not None: if head_mask.shape[0] != (len(self.layers)): raise ValueError( f"The head_mask should be specified for {len(self.layers)} layers, but it is for " f" {head_mask.shape[0]}." ) for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states += (hidden_states,) layer_outputs = layer( hidden_states, attention_mask, head_mask[i] if head_mask is not None else None, encoder_hidden_states, encoder_attention_mask, init_cache, deterministic, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) if output_hidden_states: all_hidden_states += (hidden_states,) outputs = (hidden_states, all_hidden_states, all_attentions, all_cross_attentions) if not return_dict: return tuple(v for v in outputs if v is not None) return FlaxBaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, cross_attentions=all_cross_attentions, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertEncoder with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormEncoder(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation gradient_checkpointing: bool = False def setup(self): self.layer = FlaxRobertaPreLayerNormLayerCollection( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) def __call__( self, hidden_states, attention_mask, head_mask, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): return self.layer( hidden_states, attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPooler with Bert->RobertaPreLayerNorm class FlaxRobertaPreLayerNormPooler(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation def setup(self): self.dense = nn.Dense( self.config.hidden_size, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), dtype=self.dtype, ) def __call__(self, hidden_states): cls_hidden_state = hidden_states[:, 0] cls_hidden_state = self.dense(cls_hidden_state) return nn.tanh(cls_hidden_state) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaLMHead with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormLMHead(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 bias_init: Callable[..., np.ndarray] = jax.nn.initializers.zeros def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.decoder = nn.Dense( self.config.vocab_size, dtype=self.dtype, use_bias=False, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) self.bias = self.param("bias", self.bias_init, (self.config.vocab_size,)) def __call__(self, hidden_states, shared_embedding=None): hidden_states = self.dense(hidden_states) hidden_states = ACT2FN["gelu"](hidden_states) hidden_states = self.layer_norm(hidden_states) if shared_embedding is not None: hidden_states = self.decoder.apply({"params": {"kernel": shared_embedding.T}}, hidden_states) else: hidden_states = self.decoder(hidden_states) bias = jnp.asarray(self.bias, self.dtype) hidden_states += bias return hidden_states # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaClassificationHead with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormClassificationHead(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 def setup(self): self.dense = nn.Dense( self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.out_proj = nn.Dense( self.config.num_labels, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(self.config.initializer_range), ) def __call__(self, hidden_states, deterministic=True): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.dense(hidden_states) hidden_states = nn.tanh(hidden_states) hidden_states = self.dropout(hidden_states, deterministic=deterministic) hidden_states = self.out_proj(hidden_states) return hidden_states # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaPreTrainedModel with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormPreTrainedModel(FlaxPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = RobertaPreLayerNormConfig base_model_prefix = "roberta_prelayernorm" module_class: nn.Module = None def __init__( self, config: RobertaPreLayerNormConfig, input_shape: Tuple = (1, 1), seed: int = 0, dtype: jnp.dtype = jnp.float32, _do_init: bool = True, gradient_checkpointing: bool = False, **kwargs, ): module = self.module_class(config=config, dtype=dtype, gradient_checkpointing=gradient_checkpointing, **kwargs) super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertPreTrainedModel.enable_gradient_checkpointing def enable_gradient_checkpointing(self): self._module = self.module_class( config=self.config, dtype=self.dtype, gradient_checkpointing=True, ) def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict: # init input tensors input_ids = jnp.zeros(input_shape, dtype="i4") token_type_ids = jnp.ones_like(input_ids) position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) attention_mask = jnp.ones_like(input_ids) head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} if self.config.add_cross_attention: encoder_hidden_states = jnp.zeros(input_shape + (self.config.hidden_size,)) encoder_attention_mask = attention_mask module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states, encoder_attention_mask, return_dict=False, ) else: module_init_outputs = self.module.init( rngs, input_ids, attention_mask, token_type_ids, position_ids, head_mask, return_dict=False ) random_params = module_init_outputs["params"] if params is not None: random_params = flatten_dict(unfreeze(random_params)) params = flatten_dict(unfreeze(params)) for missing_key in self._missing_keys: params[missing_key] = random_params[missing_key] self._missing_keys = set() return freeze(unflatten_dict(params)) else: return random_params # Copied from transformers.models.bart.modeling_flax_bart.FlaxBartDecoderPreTrainedModel.init_cache def init_cache(self, batch_size, max_length): r""" Args: batch_size (`int`): batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache. max_length (`int`): maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized cache. """ # init input variables to retrieve cache input_ids = jnp.ones((batch_size, max_length), dtype="i4") attention_mask = jnp.ones_like(input_ids, dtype="i4") position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) init_variables = self.module.init( jax.random.PRNGKey(0), input_ids, attention_mask, position_ids, return_dict=False, init_cache=True ) return unfreeze(init_variables["cache"]) @add_start_docstrings_to_model_forward(ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, sequence_length")) def __call__( self, input_ids, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train: bool = False, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, past_key_values: dict = None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.return_dict # init input tensors if not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) if position_ids is None: position_ids = create_position_ids_from_input_ids(input_ids, self.config.pad_token_id) if attention_mask is None: attention_mask = jnp.ones_like(input_ids) if head_mask is None: head_mask = jnp.ones((self.config.num_hidden_layers, self.config.num_attention_heads)) # Handle any PRNG if needed rngs = {} if dropout_rng is not None: rngs["dropout"] = dropout_rng inputs = {"params": params or self.params} if self.config.add_cross_attention: # if past_key_values are passed then cache is already initialized a private flag init_cache has to be passed # down to ensure cache is used. It has to be made sure that cache is marked as mutable so that it can be # changed by FlaxRobertaPreLayerNormAttention module if past_key_values: inputs["cache"] = past_key_values mutable = ["cache"] else: mutable = False outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, mutable=mutable, ) # add updated cache to model output if past_key_values is not None and return_dict: outputs, past_key_values = outputs outputs["past_key_values"] = unfreeze(past_key_values["cache"]) return outputs elif past_key_values is not None and not return_dict: outputs, past_key_values = outputs outputs = outputs[:1] + (unfreeze(past_key_values["cache"]),) + outputs[1:] else: outputs = self.module.apply( inputs, jnp.array(input_ids, dtype="i4"), jnp.array(attention_mask, dtype="i4"), token_type_ids=jnp.array(token_type_ids, dtype="i4"), position_ids=jnp.array(position_ids, dtype="i4"), head_mask=jnp.array(head_mask, dtype="i4"), deterministic=not train, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, rngs=rngs, ) return outputs class FlaxRobertaPreLayerNormModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 # the dtype of the computation add_pooling_layer: bool = True gradient_checkpointing: bool = False def setup(self): self.embeddings = FlaxRobertaPreLayerNormEmbeddings(self.config, dtype=self.dtype) self.encoder = FlaxRobertaPreLayerNormEncoder( self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.LayerNorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype) self.pooler = FlaxRobertaPreLayerNormPooler(self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids: Optional[jnp.ndarray] = None, position_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # make sure `token_type_ids` is correctly initialized when not passed if token_type_ids is None: token_type_ids = jnp.zeros_like(input_ids) # make sure `position_ids` is correctly initialized when not passed if position_ids is None: position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape) hidden_states = self.embeddings( input_ids, token_type_ids, position_ids, attention_mask, deterministic=deterministic ) outputs = self.encoder( hidden_states, attention_mask, head_mask=head_mask, deterministic=deterministic, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.LayerNorm(hidden_states) pooled = self.pooler(hidden_states) if self.add_pooling_layer else None if not return_dict: # if pooled is None, don't return it if pooled is None: return (hidden_states,) + outputs[1:] return (hidden_states, pooled) + outputs[1:] return FlaxBaseModelOutputWithPoolingAndCrossAttentions( last_hidden_state=hidden_states, pooler_output=pooled, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( "The bare RoBERTa-PreLayerNorm Model transformer outputting raw hidden-states without any specific head on top.", ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaModel with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormModel(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormModule append_call_sample_docstring( FlaxRobertaPreLayerNormModel, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLMModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForMaskedLMModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][ "embedding" ] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxMaskedLMOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """RoBERTa-PreLayerNorm Model with a `language modeling` head on top.""", ROBERTA_PRELAYERNORM_START_DOCSTRING ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMaskedLM with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForMaskedLM(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForMaskedLMModule append_call_sample_docstring( FlaxRobertaPreLayerNormForMaskedLM, _CHECKPOINT_FOR_DOC, FlaxBaseModelOutputWithPooling, _CONFIG_FOR_DOC, mask="<mask>", ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassificationModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForSequenceClassificationModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.classifier = FlaxRobertaPreLayerNormClassificationHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output, deterministic=deterministic) if not return_dict: return (logits,) + outputs[1:] return FlaxSequenceClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForSequenceClassification with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForSequenceClassification(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForSequenceClassificationModule append_call_sample_docstring( FlaxRobertaPreLayerNormForSequenceClassification, _CHECKPOINT_FOR_DOC, FlaxSequenceClassifierOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForMultipleChoiceModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForMultipleChoiceModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.dropout = nn.Dropout(rate=self.config.hidden_dropout_prob) self.classifier = nn.Dense(1, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): num_choices = input_ids.shape[1] input_ids = input_ids.reshape(-1, input_ids.shape[-1]) if input_ids is not None else None attention_mask = attention_mask.reshape(-1, attention_mask.shape[-1]) if attention_mask is not None else None token_type_ids = token_type_ids.reshape(-1, token_type_ids.shape[-1]) if token_type_ids is not None else None position_ids = position_ids.reshape(-1, position_ids.shape[-1]) if position_ids is not None else None # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output, deterministic=deterministic) logits = self.classifier(pooled_output) reshaped_logits = logits.reshape(-1, num_choices) if not return_dict: return (reshaped_logits,) + outputs[2:] return FlaxMultipleChoiceModelOutput( logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForMultipleChoice with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForMultipleChoice(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForMultipleChoiceModule overwrite_call_docstring( FlaxRobertaPreLayerNormForMultipleChoice, ROBERTA_PRELAYERNORM_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"), ) append_call_sample_docstring( FlaxRobertaPreLayerNormForMultipleChoice, _CHECKPOINT_FOR_DOC, FlaxMultipleChoiceModelOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForTokenClassificationModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForTokenClassificationModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) classifier_dropout = ( self.config.classifier_dropout if self.config.classifier_dropout is not None else self.config.hidden_dropout_prob ) self.dropout = nn.Dropout(rate=classifier_dropout) self.classifier = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states, deterministic=deterministic) logits = self.classifier(hidden_states) if not return_dict: return (logits,) + outputs[1:] return FlaxTokenClassifierOutput( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForTokenClassification with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForTokenClassification(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForTokenClassificationModule append_call_sample_docstring( FlaxRobertaPreLayerNormForTokenClassification, _CHECKPOINT_FOR_DOC, FlaxTokenClassifierOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_flax_bert.FlaxBertForQuestionAnsweringModule with Bert->RobertaPreLayerNorm, with self.bert->self.roberta_prelayernorm class FlaxRobertaPreLayerNormForQuestionAnsweringModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, dtype=self.dtype, add_pooling_layer=False, gradient_checkpointing=self.gradient_checkpointing, ) self.qa_outputs = nn.Dense(self.config.num_labels, dtype=self.dtype) def __call__( self, input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] logits = self.qa_outputs(hidden_states) start_logits, end_logits = logits.split(self.config.num_labels, axis=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if not return_dict: return (start_logits, end_logits) + outputs[1:] return FlaxQuestionAnsweringModelOutput( start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForQuestionAnswering with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForQuestionAnswering(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForQuestionAnsweringModule append_call_sample_docstring( FlaxRobertaPreLayerNormForQuestionAnswering, _CHECKPOINT_FOR_DOC, FlaxQuestionAnsweringModelOutput, _CONFIG_FOR_DOC, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLMModule with Roberta->RobertaPreLayerNorm,roberta->roberta_prelayernorm class FlaxRobertaPreLayerNormForCausalLMModule(nn.Module): config: RobertaPreLayerNormConfig dtype: jnp.dtype = jnp.float32 gradient_checkpointing: bool = False def setup(self): self.roberta_prelayernorm = FlaxRobertaPreLayerNormModule( config=self.config, add_pooling_layer=False, dtype=self.dtype, gradient_checkpointing=self.gradient_checkpointing, ) self.lm_head = FlaxRobertaPreLayerNormLMHead(config=self.config, dtype=self.dtype) def __call__( self, input_ids, attention_mask, position_ids, token_type_ids: Optional[jnp.ndarray] = None, head_mask: Optional[jnp.ndarray] = None, encoder_hidden_states: Optional[jnp.ndarray] = None, encoder_attention_mask: Optional[jnp.ndarray] = None, init_cache: bool = False, deterministic: bool = True, output_attentions: bool = False, output_hidden_states: bool = False, return_dict: bool = True, ): # Model outputs = self.roberta_prelayernorm( input_ids, attention_mask, token_type_ids, position_ids, head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, init_cache=init_cache, deterministic=deterministic, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] if self.config.tie_word_embeddings: shared_embedding = self.roberta_prelayernorm.variables["params"]["embeddings"]["word_embeddings"][ "embedding" ] else: shared_embedding = None # Compute the prediction scores logits = self.lm_head(hidden_states, shared_embedding=shared_embedding) if not return_dict: return (logits,) + outputs[1:] return FlaxCausalLMOutputWithCrossAttentions( logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) @add_start_docstrings( """ RobertaPreLayerNorm Model with a language modeling head on top (a linear layer on top of the hidden-states output) e.g for autoregressive tasks. """, ROBERTA_PRELAYERNORM_START_DOCSTRING, ) # Copied from transformers.models.roberta.modeling_flax_roberta.FlaxRobertaForCausalLM with Roberta->RobertaPreLayerNorm class FlaxRobertaPreLayerNormForCausalLM(FlaxRobertaPreLayerNormPreTrainedModel): module_class = FlaxRobertaPreLayerNormForCausalLMModule def prepare_inputs_for_generation(self, input_ids, max_length, attention_mask: Optional[jnp.DeviceArray] = None): # initializing the cache batch_size, seq_length = input_ids.shape past_key_values = self.init_cache(batch_size, max_length) # Note that usually one would have to put 0's in the attention_mask for x > input_ids.shape[-1] and x < cache_length. # But since the decoder uses a causal mask, those positions are masked anyway. # Thus, we can create a single static attention_mask here, which is more efficient for compilation extended_attention_mask = jnp.ones((batch_size, max_length), dtype="i4") if attention_mask is not None: position_ids = attention_mask.cumsum(axis=-1) - 1 extended_attention_mask = lax.dynamic_update_slice(extended_attention_mask, attention_mask, (0, 0)) else: position_ids = jnp.broadcast_to(jnp.arange(seq_length, dtype="i4")[None, :], (batch_size, seq_length)) return { "past_key_values": past_key_values, "attention_mask": extended_attention_mask, "position_ids": position_ids, } def update_inputs_for_generation(self, model_outputs, model_kwargs): model_kwargs["past_key_values"] = model_outputs.past_key_values model_kwargs["position_ids"] = model_kwargs["position_ids"][:, -1:] + 1 return model_kwargs append_call_sample_docstring( FlaxRobertaPreLayerNormForCausalLM, _CHECKPOINT_FOR_DOC, FlaxCausalLMOutputWithCrossAttentions, _CONFIG_FOR_DOC, )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/camembert/test_modeling_tf_camembert.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class TFCamembertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFCamembertModel.from_pretrained("jplu/tf-camembert-base") input_ids = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]], dtype=tf.int32, ) # J'aime le camembert !" output = model(input_ids)["last_hidden_state"] expected_shape = tf.TensorShape((1, 10, 768)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]], dtype=tf.float32, ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import numpy as np import tensorflow as tf from transformers import TFCamembertModel @require_tf @require_sentencepiece @require_tokenizers class TFCamembertModelIntegrationTest(unittest.TestCase): @slow def test_output_embeds_base_model(self): model = TFCamembertModel.from_pretrained("jplu/tf-camembert-base") input_ids = tf.convert_to_tensor( [[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]], dtype=tf.int32, ) # J'aime le camembert !" output = model(input_ids)["last_hidden_state"] expected_shape = tf.TensorShape((1, 10, 768)) self.assertEqual(output.shape, expected_shape) # compare the actual values for a slice. expected_slice = tf.convert_to_tensor( [[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]], dtype=tf.float32, ) # camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0') # camembert.eval() # expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach() self.assertTrue(np.allclose(output[:, :3, :3].numpy(), expected_slice.numpy(), atol=1e-4))
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/utils/doc.py
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Doc utilities: Utilities related to documentation """ import functools import re import types def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") class_name = f"[`{fn.__qualname__.split('.')[0]}`]" intro = f" The {class_name} forward method, overrides the `__call__` special method." note = r""" <Tip> Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. </Tip> """ fn.__doc__ = intro + note + docstring return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) return fn return docstring_decorator PT_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ TF_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _convert_output_args_doc(output_args_doc): """Convert output_args_doc to display properly.""" # Split output_arg_doc in blocks argument/description indent = _get_indent(output_args_doc) blocks = [] current_block = "" for line in output_args_doc.split("\n"): # If the indent is the same as the beginning, the line is the name of new arg. if _get_indent(line) == indent: if len(current_block) > 0: blocks.append(current_block[:-1]) current_block = f"{line}\n" else: # Otherwise it's part of the description of the current arg. # We need to remove 2 spaces to the indentation. current_block += f"{line[2:]}\n" blocks.append(current_block[:-1]) # Format each block for proper rendering for i in range(len(blocks)): blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) return "\n".join(blocks) def _prepare_output_docstrings(output_type, config_class, min_indent=None): """ Prepares the return part of the docstring using `output_type`. """ output_docstring = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) result = intro + params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result FAKE_MODEL_DISCLAIMER = """ <Tip warning={true}> This example uses a random model as the real ones are all very big. To get proper results, you should use {real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try adding `device_map="auto"` in the `from_pretrained` call. </Tip> """ PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" ... ) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_token_class_ids = logits.argmax(-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] >>> predicted_tokens_classes {expected_output} >>> labels = predicted_token_class_ids >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True) {expected_output} >>> # target is "nice puppet" >>> target_start_index = torch.tensor([{qa_target_start_index}]) >>> target_end_index = torch.tensor([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] {expected_output} >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = torch.tensor([1]) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` Example of multi-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5] >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained( ... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification" ... ) >>> labels = torch.sum( ... torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1 ... ).to(torch.float) >>> loss = model(**inputs, labels=labels).loss ``` """ PT_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) {expected_loss} ``` """ PT_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ PT_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1 >>> # the linear classifier still needs to be trained >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_ids = torch.argmax(logits, dim=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label {expected_output} >>> # compute loss - target_label is e.g. "down" >>> target_label = model.config.id2label[0] >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_FRAME_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> probabilities = torch.sigmoid(logits[0]) >>> # labels is a one-hot array of shape (num_frames, num_speakers) >>> labels = (probabilities > 0.5).long() >>> labels[0].tolist() {expected_output} ``` """ PT_SPEECH_XVECTOR_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor( ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True ... ) >>> with torch.no_grad(): ... embeddings = model(**inputs).embeddings >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() >>> # the resulting embeddings can be used for cosine similarity-based retrieval >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1) >>> similarity = cosine_sim(embeddings[0], embeddings[1]) >>> threshold = 0.7 # the optimal threshold is dataset-dependent >>> if similarity < threshold: ... print("Speakers are not the same!") >>> round(similarity.item(), 2) {expected_output} ``` """ PT_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, "CTC": PT_SPEECH_CTC_SAMPLE, "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, } TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf" ... ) >>> logits = model(**inputs).logits >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] >>> predicted_tokens_classes {expected_output} ``` ```python >>> labels = predicted_token_class_ids >>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="tf") >>> outputs = model(**inputs) >>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0]) >>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0]) >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens) {expected_output} ``` ```python >>> # target is "nice puppet" >>> target_start_index = tf.constant([{qa_target_start_index}]) >>> target_end_index = tf.constant([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = tf.math.reduce_mean(outputs.loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) >>> model.config.id2label[predicted_class_id] {expected_output} ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = tf.constant(1) >>> loss = model(**inputs, labels=labels).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") >>> logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0]) >>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index) >>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(float(outputs.loss), 2) {expected_loss} ``` """ TF_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ TF_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} >>> outputs = model(inputs) # batch size is 1 >>> # the linear classifier still needs to be trained >>> logits = outputs.logits ``` """ TF_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> logits = outputs.logits ``` """ TF_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import tensorflow as tf >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_ids = tf.math.argmax(logits, axis=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} ``` ```python >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import tensorflow as tf >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = int(tf.math.argmax(logits, axis=-1)) >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ TF_SAMPLE_DOCSTRINGS = { "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": TF_MASKED_LM_SAMPLE, "LMHead": TF_CAUSAL_LM_SAMPLE, "BaseModel": TF_BASE_MODEL_SAMPLE, "SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE, "CTC": TF_SPEECH_CTC_SAMPLE, "VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE, "ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE, } FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="jax") >>> outputs = model(**inputs) >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ``` """ FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ FLAX_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}}) >>> logits = outputs.logits ``` """ FLAX_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") >>> outputs = model(**inputs) >>> # retrieve logts for next token >>> next_token_logits = outputs.logits[:, -1] ``` """ FLAX_SAMPLE_DOCSTRINGS = { "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": FLAX_MASKED_LM_SAMPLE, "BaseModel": FLAX_BASE_MODEL_SAMPLE, "LMHead": FLAX_CAUSAL_LM_SAMPLE, } def filter_outputs_from_example(docstring, **kwargs): """ Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`. """ for key, value in kwargs.items(): if value is not None: continue doc_key = "{" + key + "}" docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring) return docstring def add_code_sample_docstrings( *docstr, processor_class=None, checkpoint=None, output_type=None, config_class=None, mask="[MASK]", qa_target_start_index=14, qa_target_end_index=15, model_cls=None, modality=None, expected_output=None, expected_loss=None, real_checkpoint=None, ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls if model_class[:2] == "TF": sample_docstrings = TF_SAMPLE_DOCSTRINGS elif model_class[:4] == "Flax": sample_docstrings = FLAX_SAMPLE_DOCSTRINGS else: sample_docstrings = PT_SAMPLE_DOCSTRINGS # putting all kwargs for docstrings in a dict to be used # with the `.format(**doc_kwargs)`. Note that string might # be formatted with non-existing keys, which is fine. doc_kwargs = { "model_class": model_class, "processor_class": processor_class, "checkpoint": checkpoint, "mask": mask, "qa_target_start_index": qa_target_start_index, "qa_target_end_index": qa_target_end_index, "expected_output": expected_output, "expected_loss": expected_loss, "real_checkpoint": real_checkpoint, "fake_checkpoint": checkpoint, "true": "{true}", # For <Tip warning={true}> syntax that conflicts with formatting. } if ("SequenceClassification" in model_class or "AudioClassification" in model_class) and modality == "audio": code_sample = sample_docstrings["AudioClassification"] elif "SequenceClassification" in model_class: code_sample = sample_docstrings["SequenceClassification"] elif "QuestionAnswering" in model_class: code_sample = sample_docstrings["QuestionAnswering"] elif "TokenClassification" in model_class: code_sample = sample_docstrings["TokenClassification"] elif "MultipleChoice" in model_class: code_sample = sample_docstrings["MultipleChoice"] elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: code_sample = sample_docstrings["MaskedLM"] elif "LMHead" in model_class or "CausalLM" in model_class: code_sample = sample_docstrings["LMHead"] elif "CTC" in model_class: code_sample = sample_docstrings["CTC"] elif "AudioFrameClassification" in model_class: code_sample = sample_docstrings["AudioFrameClassification"] elif "XVector" in model_class and modality == "audio": code_sample = sample_docstrings["AudioXVector"] elif "Model" in model_class and modality == "audio": code_sample = sample_docstrings["SpeechBaseModel"] elif "Model" in model_class and modality == "vision": code_sample = sample_docstrings["VisionBaseModel"] elif "Model" in model_class or "Encoder" in model_class: code_sample = sample_docstrings["BaseModel"] elif "ImageClassification" in model_class: code_sample = sample_docstrings["ImageClassification"] else: raise ValueError(f"Docstring can't be built for model {model_class}") code_sample = filter_outputs_from_example( code_sample, expected_output=expected_output, expected_loss=expected_loss ) if real_checkpoint is not None: code_sample = FAKE_MODEL_DISCLAIMER + code_sample func_doc = (fn.__doc__ or "") + "".join(docstr) output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) built_doc = code_sample.format(**doc_kwargs) fn.__doc__ = func_doc + output_doc + built_doc return fn return docstring_decorator def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): func_doc = fn.__doc__ lines = func_doc.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = len(_get_indent(lines[i])) lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) func_doc = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " f"current docstring is:\n{func_doc}" ) fn.__doc__ = func_doc return fn return docstring_decorator def copy_func(f): """Returns a copy of a function f.""" # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g
# Copyright 2022 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Doc utilities: Utilities related to documentation """ import functools import re import types def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): docstring = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") class_name = f"[`{fn.__qualname__.split('.')[0]}`]" intro = f" The {class_name} forward method, overrides the `__call__` special method." note = r""" <Tip> Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`] instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. </Tip> """ fn.__doc__ = intro + note + docstring return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = (fn.__doc__ if fn.__doc__ is not None else "") + "".join(docstr) return fn return docstring_decorator PT_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(torch.FloatTensor)`: A [`{full_output_type}`] or a tuple of `torch.FloatTensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ TF_RETURN_INTRODUCTION = r""" Returns: [`{full_output_type}`] or `tuple(tf.Tensor)`: A [`{full_output_type}`] or a tuple of `tf.Tensor` (if `return_dict=False` is passed or when `config.return_dict=False`) comprising various elements depending on the configuration ([`{config_class}`]) and inputs. """ def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _convert_output_args_doc(output_args_doc): """Convert output_args_doc to display properly.""" # Split output_arg_doc in blocks argument/description indent = _get_indent(output_args_doc) blocks = [] current_block = "" for line in output_args_doc.split("\n"): # If the indent is the same as the beginning, the line is the name of new arg. if _get_indent(line) == indent: if len(current_block) > 0: blocks.append(current_block[:-1]) current_block = f"{line}\n" else: # Otherwise it's part of the description of the current arg. # We need to remove 2 spaces to the indentation. current_block += f"{line[2:]}\n" blocks.append(current_block[:-1]) # Format each block for proper rendering for i in range(len(blocks)): blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) return "\n".join(blocks) def _prepare_output_docstrings(output_type, config_class, min_indent=None): """ Prepares the return part of the docstring using `output_type`. """ output_docstring = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = output_docstring.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): params_docstring = "\n".join(lines[(i + 1) :]) params_docstring = _convert_output_args_doc(params_docstring) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) result = intro + params_docstring # Apply minimum indent if necessary if min_indent is not None: lines = result.split("\n") # Find the indent of the first nonempty line i = 0 while len(lines[i]) == 0: i += 1 indent = len(_get_indent(lines[i])) # If too small, add indentation to all nonempty lines if indent < min_indent: to_add = " " * (min_indent - indent) lines = [(f"{to_add}{line}" if len(line) > 0 else line) for line in lines] result = "\n".join(lines) return result FAKE_MODEL_DISCLAIMER = """ <Tip warning={true}> This example uses a random model as the real ones are all very big. To get proper results, you should use {real_checkpoint} instead of {fake_checkpoint}. If you get out-of-memory when loading that checkpoint, you can try adding `device_map="auto"` in the `from_pretrained` call. </Tip> """ PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="pt" ... ) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_token_class_ids = logits.argmax(-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t.item()] for t in predicted_token_class_ids[0]] >>> predicted_tokens_classes {expected_output} >>> labels = predicted_token_class_ids >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> answer_start_index = outputs.start_logits.argmax() >>> answer_end_index = outputs.end_logits.argmax() >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens, skip_special_tokens=True) {expected_output} >>> # target is "nice puppet" >>> target_start_index = torch.tensor([{qa_target_start_index}]) >>> target_end_index = torch.tensor([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = outputs.loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example of single-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_id = logits.argmax().item() >>> model.config.id2label[predicted_class_id] {expected_output} >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = torch.tensor([1]) >>> loss = model(**inputs, labels=labels).loss >>> round(loss.item(), 2) {expected_loss} ``` Example of multi-label classification: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}", problem_type="multi_label_classification") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.arange(0, logits.shape[-1])[torch.sigmoid(logits).squeeze(dim=0) > 0.5] >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained( ... "{checkpoint}", num_labels=num_labels, problem_type="multi_label_classification" ... ) >>> labels = torch.sum( ... torch.nn.functional.one_hot(predicted_class_ids[None, :].clone(), num_classes=num_labels), dim=1 ... ).to(torch.float) >>> loss = model(**inputs, labels=labels).loss ``` """ PT_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = (inputs.input_ids == tokenizer.mask_token_id)[0].nonzero(as_tuple=True)[0] >>> predicted_token_id = logits[0, mask_token_index].argmax(axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} >>> labels = tokenizer("The capital of France is Paris.", return_tensors="pt")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = torch.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(outputs.loss.item(), 2) {expected_loss} ``` """ PT_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ PT_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="pt", padding=True) >>> outputs = model(**{{k: v.unsqueeze(0) for k, v in encoding.items()}}, labels=labels) # batch size is 1 >>> # the linear classifier still needs to be trained >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> import torch >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> loss = outputs.loss >>> logits = outputs.logits ``` """ PT_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_ids = torch.argmax(logits, dim=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="pt").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> predicted_class_ids = torch.argmax(logits, dim=-1).item() >>> predicted_label = model.config.id2label[predicted_class_ids] >>> predicted_label {expected_output} >>> # compute loss - target_label is e.g. "down" >>> target_label = model.config.id2label[0] >>> inputs["labels"] = torch.tensor([model.config.label2id[target_label]]) >>> loss = model(**inputs).loss >>> round(loss.item(), 2) {expected_loss} ``` """ PT_SPEECH_FRAME_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor(dataset[0]["audio"]["array"], return_tensors="pt", sampling_rate=sampling_rate) >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> probabilities = torch.sigmoid(logits[0]) >>> # labels is a one-hot array of shape (num_frames, num_speakers) >>> labels = (probabilities > 0.5).long() >>> labels[0].tolist() {expected_output} ``` """ PT_SPEECH_XVECTOR_SAMPLE = r""" Example: ```python >>> from transformers import AutoFeatureExtractor, {model_class} >>> from datasets import load_dataset >>> import torch >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> feature_extractor = AutoFeatureExtractor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = feature_extractor( ... [d["array"] for d in dataset[:2]["audio"]], sampling_rate=sampling_rate, return_tensors="pt", padding=True ... ) >>> with torch.no_grad(): ... embeddings = model(**inputs).embeddings >>> embeddings = torch.nn.functional.normalize(embeddings, dim=-1).cpu() >>> # the resulting embeddings can be used for cosine similarity-based retrieval >>> cosine_sim = torch.nn.CosineSimilarity(dim=-1) >>> similarity = cosine_sim(embeddings[0], embeddings[1]) >>> threshold = 0.7 # the optimal threshold is dataset-dependent >>> if similarity < threshold: ... print("Speakers are not the same!") >>> round(similarity.item(), 2) {expected_output} ``` """ PT_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ PT_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import torch >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="pt") >>> with torch.no_grad(): ... logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = logits.argmax(-1).item() >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ PT_SAMPLE_DOCSTRINGS = { "SequenceClassification": PT_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": PT_QUESTION_ANSWERING_SAMPLE, "TokenClassification": PT_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": PT_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": PT_MASKED_LM_SAMPLE, "LMHead": PT_CAUSAL_LM_SAMPLE, "BaseModel": PT_BASE_MODEL_SAMPLE, "SpeechBaseModel": PT_SPEECH_BASE_MODEL_SAMPLE, "CTC": PT_SPEECH_CTC_SAMPLE, "AudioClassification": PT_SPEECH_SEQ_CLASS_SAMPLE, "AudioFrameClassification": PT_SPEECH_FRAME_CLASS_SAMPLE, "AudioXVector": PT_SPEECH_XVECTOR_SAMPLE, "VisionBaseModel": PT_VISION_BASE_MODEL_SAMPLE, "ImageClassification": PT_VISION_SEQ_CLASS_SAMPLE, } TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer( ... "HuggingFace is a company based in Paris and New York", add_special_tokens=False, return_tensors="tf" ... ) >>> logits = model(**inputs).logits >>> predicted_token_class_ids = tf.math.argmax(logits, axis=-1) >>> # Note that tokens are classified rather then input words which means that >>> # there might be more predicted token classes than words. >>> # Multiple token classes might account for the same word >>> predicted_tokens_classes = [model.config.id2label[t] for t in predicted_token_class_ids[0].numpy().tolist()] >>> predicted_tokens_classes {expected_output} ``` ```python >>> labels = predicted_token_class_ids >>> loss = tf.math.reduce_mean(model(**inputs, labels=labels).loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="tf") >>> outputs = model(**inputs) >>> answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0]) >>> answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0]) >>> predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] >>> tokenizer.decode(predict_answer_tokens) {expected_output} ``` ```python >>> # target is "nice puppet" >>> target_start_index = tf.constant([{qa_target_start_index}]) >>> target_end_index = tf.constant([{qa_target_end_index}]) >>> outputs = model(**inputs, start_positions=target_start_index, end_positions=target_end_index) >>> loss = tf.math.reduce_mean(outputs.loss) >>> round(float(loss), 2) {expected_loss} ``` """ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_class_id = int(tf.math.argmax(logits, axis=-1)[0]) >>> model.config.id2label[predicted_class_id] {expected_output} ``` ```python >>> # To train a model on `num_labels` classes, you can pass `num_labels=num_labels` to `.from_pretrained(...)` >>> num_labels = len(model.config.id2label) >>> model = {model_class}.from_pretrained("{checkpoint}", num_labels=num_labels) >>> labels = tf.constant(1) >>> loss = model(**inputs, labels=labels).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="tf") >>> logits = model(**inputs).logits >>> # retrieve index of {mask} >>> mask_token_index = tf.where((inputs.input_ids == tokenizer.mask_token_id)[0]) >>> selected_logits = tf.gather_nd(logits[0], indices=mask_token_index) >>> predicted_token_id = tf.math.argmax(selected_logits, axis=-1) >>> tokenizer.decode(predicted_token_id) {expected_output} ``` ```python >>> labels = tokenizer("The capital of France is Paris.", return_tensors="tf")["input_ids"] >>> # mask labels of non-{mask} tokens >>> labels = tf.where(inputs.input_ids == tokenizer.mask_token_id, labels, -100) >>> outputs = model(**inputs, labels=labels) >>> round(float(outputs.loss), 2) {expected_loss} ``` """ TF_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ TF_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="tf", padding=True) >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} >>> outputs = model(inputs) # batch size is 1 >>> # the linear classifier still needs to be trained >>> logits = outputs.logits ``` """ TF_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> import tensorflow as tf >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> logits = outputs.logits ``` """ TF_SPEECH_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_SPEECH_CTC_SAMPLE = r""" Example: ```python >>> from transformers import AutoProcessor, {model_class} >>> from datasets import load_dataset >>> import tensorflow as tf >>> dataset = load_dataset("hf-internal-testing/librispeech_asr_demo", "clean", split="validation") >>> dataset = dataset.sort("id") >>> sampling_rate = dataset.features["audio"].sampling_rate >>> processor = AutoProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> # audio file is decoded on the fly >>> inputs = processor(dataset[0]["audio"]["array"], sampling_rate=sampling_rate, return_tensors="tf") >>> logits = model(**inputs).logits >>> predicted_ids = tf.math.argmax(logits, axis=-1) >>> # transcribe speech >>> transcription = processor.batch_decode(predicted_ids) >>> transcription[0] {expected_output} ``` ```python >>> inputs["labels"] = processor(text=dataset[0]["text"], return_tensors="tf").input_ids >>> # compute loss >>> loss = model(**inputs).loss >>> round(float(loss), 2) {expected_loss} ``` """ TF_VISION_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state >>> list(last_hidden_states.shape) {expected_output} ``` """ TF_VISION_SEQ_CLASS_SAMPLE = r""" Example: ```python >>> from transformers import AutoImageProcessor, {model_class} >>> import tensorflow as tf >>> from datasets import load_dataset >>> dataset = load_dataset("huggingface/cats-image") >>> image = dataset["test"]["image"][0] >>> image_processor = AutoImageProcessor.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = image_processor(image, return_tensors="tf") >>> logits = model(**inputs).logits >>> # model predicts one of the 1000 ImageNet classes >>> predicted_label = int(tf.math.argmax(logits, axis=-1)) >>> print(model.config.id2label[predicted_label]) {expected_output} ``` """ TF_SAMPLE_DOCSTRINGS = { "SequenceClassification": TF_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": TF_QUESTION_ANSWERING_SAMPLE, "TokenClassification": TF_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": TF_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": TF_MASKED_LM_SAMPLE, "LMHead": TF_CAUSAL_LM_SAMPLE, "BaseModel": TF_BASE_MODEL_SAMPLE, "SpeechBaseModel": TF_SPEECH_BASE_MODEL_SAMPLE, "CTC": TF_SPEECH_CTC_SAMPLE, "VisionBaseModel": TF_VISION_BASE_MODEL_SAMPLE, "ImageClassification": TF_VISION_SEQ_CLASS_SAMPLE, } FLAX_TOKEN_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_QUESTION_ANSWERING_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> inputs = tokenizer(question, text, return_tensors="jax") >>> outputs = model(**inputs) >>> start_scores = outputs.start_logits >>> end_scores = outputs.end_logits ``` """ FLAX_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_MASKED_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("The capital of France is {mask}.", return_tensors="jax") >>> outputs = model(**inputs) >>> logits = outputs.logits ``` """ FLAX_BASE_MODEL_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="jax") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state ``` """ FLAX_MULTIPLE_CHOICE_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([prompt, prompt], [choice0, choice1], return_tensors="jax", padding=True) >>> outputs = model(**{{k: v[None, :] for k, v in encoding.items()}}) >>> logits = outputs.logits ``` """ FLAX_CAUSAL_LM_SAMPLE = r""" Example: ```python >>> from transformers import AutoTokenizer, {model_class} >>> tokenizer = AutoTokenizer.from_pretrained("{checkpoint}") >>> model = {model_class}.from_pretrained("{checkpoint}") >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="np") >>> outputs = model(**inputs) >>> # retrieve logts for next token >>> next_token_logits = outputs.logits[:, -1] ``` """ FLAX_SAMPLE_DOCSTRINGS = { "SequenceClassification": FLAX_SEQUENCE_CLASSIFICATION_SAMPLE, "QuestionAnswering": FLAX_QUESTION_ANSWERING_SAMPLE, "TokenClassification": FLAX_TOKEN_CLASSIFICATION_SAMPLE, "MultipleChoice": FLAX_MULTIPLE_CHOICE_SAMPLE, "MaskedLM": FLAX_MASKED_LM_SAMPLE, "BaseModel": FLAX_BASE_MODEL_SAMPLE, "LMHead": FLAX_CAUSAL_LM_SAMPLE, } def filter_outputs_from_example(docstring, **kwargs): """ Removes the lines testing an output with the doctest syntax in a code sample when it's set to `None`. """ for key, value in kwargs.items(): if value is not None: continue doc_key = "{" + key + "}" docstring = re.sub(rf"\n([^\n]+)\n\s+{doc_key}\n", "\n", docstring) return docstring def add_code_sample_docstrings( *docstr, processor_class=None, checkpoint=None, output_type=None, config_class=None, mask="[MASK]", qa_target_start_index=14, qa_target_end_index=15, model_cls=None, modality=None, expected_output=None, expected_loss=None, real_checkpoint=None, ): def docstring_decorator(fn): # model_class defaults to function's class if not specified otherwise model_class = fn.__qualname__.split(".")[0] if model_cls is None else model_cls if model_class[:2] == "TF": sample_docstrings = TF_SAMPLE_DOCSTRINGS elif model_class[:4] == "Flax": sample_docstrings = FLAX_SAMPLE_DOCSTRINGS else: sample_docstrings = PT_SAMPLE_DOCSTRINGS # putting all kwargs for docstrings in a dict to be used # with the `.format(**doc_kwargs)`. Note that string might # be formatted with non-existing keys, which is fine. doc_kwargs = { "model_class": model_class, "processor_class": processor_class, "checkpoint": checkpoint, "mask": mask, "qa_target_start_index": qa_target_start_index, "qa_target_end_index": qa_target_end_index, "expected_output": expected_output, "expected_loss": expected_loss, "real_checkpoint": real_checkpoint, "fake_checkpoint": checkpoint, "true": "{true}", # For <Tip warning={true}> syntax that conflicts with formatting. } if ("SequenceClassification" in model_class or "AudioClassification" in model_class) and modality == "audio": code_sample = sample_docstrings["AudioClassification"] elif "SequenceClassification" in model_class: code_sample = sample_docstrings["SequenceClassification"] elif "QuestionAnswering" in model_class: code_sample = sample_docstrings["QuestionAnswering"] elif "TokenClassification" in model_class: code_sample = sample_docstrings["TokenClassification"] elif "MultipleChoice" in model_class: code_sample = sample_docstrings["MultipleChoice"] elif "MaskedLM" in model_class or model_class in ["FlaubertWithLMHeadModel", "XLMWithLMHeadModel"]: code_sample = sample_docstrings["MaskedLM"] elif "LMHead" in model_class or "CausalLM" in model_class: code_sample = sample_docstrings["LMHead"] elif "CTC" in model_class: code_sample = sample_docstrings["CTC"] elif "AudioFrameClassification" in model_class: code_sample = sample_docstrings["AudioFrameClassification"] elif "XVector" in model_class and modality == "audio": code_sample = sample_docstrings["AudioXVector"] elif "Model" in model_class and modality == "audio": code_sample = sample_docstrings["SpeechBaseModel"] elif "Model" in model_class and modality == "vision": code_sample = sample_docstrings["VisionBaseModel"] elif "Model" in model_class or "Encoder" in model_class: code_sample = sample_docstrings["BaseModel"] elif "ImageClassification" in model_class: code_sample = sample_docstrings["ImageClassification"] else: raise ValueError(f"Docstring can't be built for model {model_class}") code_sample = filter_outputs_from_example( code_sample, expected_output=expected_output, expected_loss=expected_loss ) if real_checkpoint is not None: code_sample = FAKE_MODEL_DISCLAIMER + code_sample func_doc = (fn.__doc__ or "") + "".join(docstr) output_doc = "" if output_type is None else _prepare_output_docstrings(output_type, config_class) built_doc = code_sample.format(**doc_kwargs) fn.__doc__ = func_doc + output_doc + built_doc return fn return docstring_decorator def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): func_doc = fn.__doc__ lines = func_doc.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): indent = len(_get_indent(lines[i])) lines[i] = _prepare_output_docstrings(output_type, config_class, min_indent=indent) func_doc = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, " f"current docstring is:\n{func_doc}" ) fn.__doc__ = func_doc return fn return docstring_decorator def copy_func(f): """Returns a copy of a function f.""" # Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard) g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__, argdefs=f.__defaults__, closure=f.__closure__) g = functools.update_wrapper(g, f) g.__kwdefaults__ = f.__kwdefaults__ return g
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/segformer/test_modeling_segformer.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SegFormer model. """ import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerFeatureExtractor class SegformerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class SegformerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 2, 4, 8], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SegformerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = SegformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_image_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = SegformerForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss, 0.0) def create_and_check_for_binary_image_segmentation(self, config, pixel_values, labels): config.num_labels = 1 model = SegformerForSemanticSegmentation(config=config) model.to(torch_device) model.eval() labels = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size)).to(torch_device) result = model(pixel_values, labels=labels) self.parent.assertGreater(result.loss, 0.0) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = SegformerModelTester(self) self.config_tester = SegformerConfigTester(self, config_class=SegformerConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_binary_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) @unittest.skip("SegFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) # verify the last attentions (last block, last layer) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SegformerModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class SegformerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation_ade(self): # only resize + normalize feature_extractor = SegformerFeatureExtractor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = feature_extractor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_image_segmentation_city(self): # only resize + normalize feature_extractor = SegformerFeatureExtractor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(torch_device) image = prepare_img() encoded_inputs = feature_extractor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-1)) @slow def test_post_processing_semantic_segmentation(self): # only resize + normalize feature_extractor = SegformerFeatureExtractor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = feature_extractor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) outputs.logits = outputs.logits.detach().cpu() segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((128, 128)) self.assertEqual(segmentation[0].shape, expected_shape)
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch SegFormer model. """ import inspect import unittest from transformers import SegformerConfig, is_torch_available, is_vision_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_MAPPING, SegformerForImageClassification, SegformerForSemanticSegmentation, SegformerModel, ) from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import SegformerFeatureExtractor class SegformerConfigTester(ConfigTester): def create_and_test_config_common_properties(self): config = self.config_class(**self.inputs_dict) self.parent.assertTrue(hasattr(config, "hidden_sizes")) self.parent.assertTrue(hasattr(config, "num_attention_heads")) self.parent.assertTrue(hasattr(config, "num_encoder_blocks")) class SegformerModelTester: def __init__( self, parent, batch_size=13, image_size=64, num_channels=3, num_encoder_blocks=4, depths=[2, 2, 2, 2], sr_ratios=[8, 4, 2, 1], hidden_sizes=[16, 32, 64, 128], downsampling_rates=[1, 4, 8, 16], num_attention_heads=[1, 2, 4, 8], is_training=True, use_labels=True, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, initializer_range=0.02, num_labels=3, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.num_channels = num_channels self.num_encoder_blocks = num_encoder_blocks self.sr_ratios = sr_ratios self.depths = depths self.hidden_sizes = hidden_sizes self.downsampling_rates = downsampling_rates self.num_attention_heads = num_attention_heads self.is_training = is_training self.use_labels = use_labels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.initializer_range = initializer_range self.num_labels = num_labels self.scope = scope def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels) config = self.get_config() return config, pixel_values, labels def get_config(self): return SegformerConfig( image_size=self.image_size, num_channels=self.num_channels, num_encoder_blocks=self.num_encoder_blocks, depths=self.depths, hidden_sizes=self.hidden_sizes, num_attention_heads=self.num_attention_heads, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, initializer_range=self.initializer_range, ) def create_and_check_model(self, config, pixel_values, labels): model = SegformerModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) expected_height = expected_width = self.image_size // (self.downsampling_rates[-1] * 2) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) ) def create_and_check_for_image_segmentation(self, config, pixel_values, labels): config.num_labels = self.num_labels model = SegformerForSemanticSegmentation(config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) result = model(pixel_values, labels=labels) self.parent.assertEqual( result.logits.shape, (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) ) self.parent.assertGreater(result.loss, 0.0) def create_and_check_for_binary_image_segmentation(self, config, pixel_values, labels): config.num_labels = 1 model = SegformerForSemanticSegmentation(config=config) model.to(torch_device) model.eval() labels = torch.randint(0, 1, (self.batch_size, self.image_size, self.image_size)).to(torch_device) result = model(pixel_values, labels=labels) self.parent.assertGreater(result.loss, 0.0) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class SegformerModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = ( ( SegformerModel, SegformerForSemanticSegmentation, SegformerForImageClassification, ) if is_torch_available() else () ) pipeline_model_mapping = ( { "feature-extraction": SegformerModel, "image-classification": SegformerForImageClassification, "image-segmentation": SegformerForSemanticSegmentation, } if is_torch_available() else {} ) fx_compatible = True test_head_masking = False test_pruning = False test_resize_embeddings = False def setUp(self): self.model_tester = SegformerModelTester(self) self.config_tester = SegformerConfigTester(self, config_class=SegformerConfig) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_binary_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_binary_image_segmentation(*config_and_inputs) def test_for_image_segmentation(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_segmentation(*config_and_inputs) @unittest.skip("SegFormer does not use inputs_embeds") def test_inputs_embeds(self): pass @unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods") def test_model_common_attributes(self): pass def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions expected_num_attentions = sum(self.model_tester.depths) self.assertEqual(len(attentions), expected_num_attentions) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.attentions self.assertEqual(len(attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) # verify the last attentions (last block, last layer) expected_seq_len = (self.model_tester.image_size // 32) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2 self.assertListEqual( list(attentions[-1].shape[-3:]), [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len], ) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) self.assertEqual(out_len + 1, len(outputs)) self_attentions = outputs.attentions self.assertEqual(len(self_attentions), expected_num_attentions) # verify the first attentions (first block, first layer) expected_seq_len = (self.model_tester.image_size // 4) ** 2 expected_reduced_seq_len = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2 self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len], ) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.hidden_states expected_num_layers = self.model_tester.num_encoder_blocks self.assertEqual(len(hidden_states), expected_num_layers) # verify the first hidden states (first block) self.assertListEqual( list(hidden_states[0].shape[-3:]), [ self.model_tester.hidden_sizes[0], self.model_tester.image_size // 4, self.model_tester.image_size // 4, ], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_training(self): if not self.model_tester.is_training: return config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True for model_class in self.all_model_classes: if model_class in get_values(MODEL_MAPPING): continue model = model_class(config) model.to(torch_device) model.train() inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True) loss = model(**inputs).loss loss.backward() @slow def test_model_from_pretrained(self): for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = SegformerModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch class SegformerModelIntegrationTest(unittest.TestCase): @slow def test_inference_image_segmentation_ade(self): # only resize + normalize feature_extractor = SegformerFeatureExtractor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = feature_extractor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]], [[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]], [[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-4)) @slow def test_inference_image_segmentation_city(self): # only resize + normalize feature_extractor = SegformerFeatureExtractor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained( "nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(torch_device) image = prepare_img() encoded_inputs = feature_extractor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) expected_shape = torch.Size((1, model.config.num_labels, 128, 128)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [ [[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]], [[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]], [[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]], ] ).to(torch_device) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3], expected_slice, atol=1e-1)) @slow def test_post_processing_semantic_segmentation(self): # only resize + normalize feature_extractor = SegformerFeatureExtractor( image_scale=(512, 512), keep_ratio=False, align=False, do_random_crop=False ) model = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512").to( torch_device ) image = prepare_img() encoded_inputs = feature_extractor(images=image, return_tensors="pt") pixel_values = encoded_inputs.pixel_values.to(torch_device) with torch.no_grad(): outputs = model(pixel_values) outputs.logits = outputs.logits.detach().cpu() segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs, target_sizes=[(500, 300)]) expected_shape = torch.Size((500, 300)) self.assertEqual(segmentation[0].shape, expected_shape) segmentation = feature_extractor.post_process_semantic_segmentation(outputs=outputs) expected_shape = torch.Size((128, 128)) self.assertEqual(segmentation[0].shape, expected_shape)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/dpt/test_image_processing_dpt.py
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class DPTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DPTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DPTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
# coding=utf-8 # Copyright 2022 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np from transformers.file_utils import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DPTImageProcessor class DPTImageProcessingTester(unittest.TestCase): def __init__( self, parent, batch_size=7, num_channels=3, image_size=18, min_resolution=30, max_resolution=400, do_resize=True, size=None, do_normalize=True, image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5], ): size = size if size is not None else {"height": 18, "width": 18} self.parent = parent self.batch_size = batch_size self.num_channels = num_channels self.image_size = image_size self.min_resolution = min_resolution self.max_resolution = max_resolution self.do_resize = do_resize self.size = size self.do_normalize = do_normalize self.image_mean = image_mean self.image_std = image_std def prepare_image_processor_dict(self): return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, } @require_torch @require_vision class DPTImageProcessingTest(ImageProcessingSavingTestMixin, unittest.TestCase): image_processing_class = DPTImageProcessor if is_vision_available() else None def setUp(self): self.image_processor_tester = DPTImageProcessingTester(self) @property def image_processor_dict(self): return self.image_processor_tester.prepare_image_processor_dict() def test_image_processor_properties(self): image_processing = self.image_processing_class(**self.image_processor_dict) self.assertTrue(hasattr(image_processing, "image_mean")) self.assertTrue(hasattr(image_processing, "image_std")) self.assertTrue(hasattr(image_processing, "do_normalize")) self.assertTrue(hasattr(image_processing, "do_resize")) self.assertTrue(hasattr(image_processing, "size")) def test_image_processor_from_dict_with_kwargs(self): image_processor = self.image_processing_class.from_dict(self.image_processor_dict) self.assertEqual(image_processor.size, {"height": 18, "width": 18}) image_processor = self.image_processing_class.from_dict(self.image_processor_dict, size=42) self.assertEqual(image_processor.size, {"height": 42, "width": 42}) def test_call_pil(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PIL images image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False) for image in image_inputs: self.assertIsInstance(image, Image.Image) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def test_call_numpy(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random numpy tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, numpify=True) for image in image_inputs: self.assertIsInstance(image, np.ndarray) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) def test_call_pytorch(self): # Initialize image_processing image_processing = self.image_processing_class(**self.image_processor_dict) # create random PyTorch tensors image_inputs = prepare_image_inputs(self.image_processor_tester, equal_resolution=False, torchify=True) for image in image_inputs: self.assertIsInstance(image, torch.Tensor) # Test not batched input encoded_images = image_processing(image_inputs[0], return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), ) # Test batched encoded_images = image_processing(image_inputs, return_tensors="pt").pixel_values self.assertEqual( encoded_images.shape, ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["height"], self.image_processor_tester.size["width"], ), )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/whisper/test_modeling_whisper.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Whisper model. """ import copy import inspect import os import tempfile import unittest import numpy as np import transformers from transformers import WhisperConfig from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_flax_available, is_torch_available from transformers.utils.import_utils import is_datasets_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_datasets_available(): import datasets from datasets import load_dataset if is_torch_available(): import torch from transformers import ( WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperModel, WhisperProcessor, set_seed, ) from transformers.models.whisper.modeling_whisper import WhisperDecoder, WhisperEncoder if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) def prepare_whisper_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { # "input_ids": input_features, "input_features": input_features, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class WhisperModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=40, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device) config = self.get_config() inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) return config, inputs_dict def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict, freeze_encoder=False): model = WhisperModel(config=config).to(torch_device).eval() if freeze_encoder: model.freeze_encoder() input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] # first forward pass last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = WhisperModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = WhisperModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = WhisperEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = WhisperDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (WhisperModel, WhisperForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (WhisperForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( {"automatic-speech-recognition": WhisperForConditionalGeneration, "feature-extraction": WhisperModel} if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False input_name = "input_features" def setUp(self): self.model_tester = WhisperModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_model_forward_with_frozen_encoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs, freeze_encoder=True) def test_requires_grad_with_frozen_encoder(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) model.freeze_encoder() try: encoder_grads = [param.requires_grad for param in model.encoder.parameters()] decoder_grads = [param.requires_grad for param in model.decoder.parameters()] except AttributeError: encoder_grads = [param.requires_grad for param in model.model.encoder.parameters()] decoder_grads = [param.requires_grad for param in model.model.decoder.parameters()] self.assertFalse(all(encoder_grads)) self.assertTrue(all(decoder_grads)) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 3 input_ids = input_ids[:max_batch_size, :, :] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, None, max_length # not implemented currently def test_inputs_embeds(self): pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass def test_generate_with_head_masking(self): pass def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = WhisperForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": input_features = input_features.half() model.half() model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", 1) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # make sure that decoder_input_ids are resized if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = torch.zeros_like(input_ids[:, :1], dtype=torch.long) + torch.tensor( [model._get_decoder_start_token_id()], device=input_ids.device ) attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, mel, seq_length = input_ids.shape subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions # encoder self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward input_features = inputs["input_features"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] traced_model = torch.jit.trace(model, (input_features, decoder_input_ids, decoder_attention_mask)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, input_shape=init_shape, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_mask_feature_prob(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.mask_feature_prob = 0.2 config.mask_feature_length = 2 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.train() # forward pass encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) def test_mask_time_prob(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.mask_time_prob = 0.2 config.mask_time_length = 2 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.train() # forward pass encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) @require_torch @require_torchaudio class WhisperModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] @slow def test_tiny_logits_librispeech(self): torch_device = "cpu" set_seed(0) model = WhisperModel.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features with torch.no_grad(): logits = model( input_features, decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = torch.tensor( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) # fmt: off EXPECTED_GENERATION = torch.tensor( [ -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 ] ) # fmt: on head_logits = logits[0] @ model.decoder.embed_tokens.weight.T self.assertTrue(torch.allclose(head_logits[0, 0, :30].cpu(), EXPECTED_GENERATION, atol=1e-4)) @slow def test_small_en_logits_librispeech(self): set_seed(0) torch_device = "cpu" model = WhisperModel.from_pretrained("openai/whisper-small.en") model.to(torch_device) input_speech = self._load_datasamples(1) feaure_extractor = WhisperFeatureExtractor() input_features = feaure_extractor(input_speech, return_tensors="pt").input_features.to(torch_device) logits = model( input_features, decoder_input_ids=torch.tensor([[model.config.decoder_start_token_id]]), output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T # fmt: off EXPECTED_LOGITS = torch.tensor( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_large_logits_librispeech(self): set_seed(0) torch_device = "cpu" model = WhisperModel.from_pretrained("openai/whisper-large") model.to(torch_device) input_speech = self._load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="pt" ) input_features = processed_inputs.input_features.to(torch_device) decoder_input_ids = processed_inputs.labels.to(torch_device) logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T # fmt: off EXPECTED_LOGITS = torch.tensor( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_tiny_en_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.batch_decode(generated_ids)[0] EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes, and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_generation_multilingual(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") model.to(torch_device) ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." self.assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="translate" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features generated_ids = model.generate(input_features, max_length=20, task="translate") # fmt: off EXPECTED_LOGITS = torch.tensor( [ [50258, 50259, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404], [50258, 50259, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257], [50258, 50259, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904], [50258, 50259, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439] ] ) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_en_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, max_length=20).to("cpu") # fmt: off EXPECTED_LOGITS = torch.tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_timestamp_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = np.concatenate(self._load_datasamples(4)) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, max_length=448, return_timestamps=True).to("cpu") # fmt: off EXPECTED_OUTPUT = torch.tensor([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_OUTPUT)) EXPECTED_TRANSCRIPT = [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is" " Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season" " of the year, with Christmas and roast beef looming before us, similarly drawn from eating and" " its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins'" " work is really Greek after all, and" ), "offsets": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 6.5600000000000005), }, { "text": " Nor is Mr. Quilter's manner less interesting than his matter.", "timestamp": (6.5600000000000005, 11.24), }, { "text": ( " He tells us that at this festive season of the year, with Christmas and roast beef" " looming" ), "timestamp": (11.24, 16.88), }, { "text": ( " before us, similarly drawn from eating and its results occur most readily to the mind." ), "timestamp": (16.88, 23.76), }, { "text": ( " He has grave doubts whether Sir Frederick Latins' work is really Greek after all, and" ), "timestamp": (23.76, 29.44), }, ], } ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True, output_offsets=True) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_specaugment_librispeech(self): torch_device = "cpu" set_seed(0) # Apply SpecAugment model = WhisperModel.from_pretrained("openai/whisper-tiny", apply_spec_augment=True) # Set model to training mode to enable SpecAugment model.train() model.to(torch_device) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features with torch.no_grad(): logits = model( input_features, decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = torch.tensor( [ 0.9362, -4.7105, 5.0879, 3.9642, 1.0013, -6.0096, 4.7285, -3.1847, -0.8648, 1.9631, 6.2653, 3.6936, 0.3575, -4.5818, 3.0564, 7.8712, 2.9951, 0.6848, 9.9497, -2.6638, 1.1571, -6.8546, -1.4333, -7.7584, 1.1200, 3.9030, 4.4655, -4.4919, -1.1703, 9.6241 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4))
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Whisper model. """ import copy import inspect import os import tempfile import unittest import numpy as np import transformers from transformers import WhisperConfig from transformers.testing_utils import is_pt_flax_cross_test, require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_flax_available, is_torch_available from transformers.utils.import_utils import is_datasets_available from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_datasets_available(): import datasets from datasets import load_dataset if is_torch_available(): import torch from transformers import ( WhisperFeatureExtractor, WhisperForConditionalGeneration, WhisperModel, WhisperProcessor, set_seed, ) from transformers.models.whisper.modeling_whisper import WhisperDecoder, WhisperEncoder if is_flax_available(): import jax.numpy as jnp from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) def prepare_whisper_inputs_dict( config, input_features, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { # "input_ids": input_features, "input_features": input_features, "decoder_input_ids": decoder_input_ids, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_torch class WhisperModelTester: def __init__( self, parent, batch_size=13, seq_length=60, is_training=True, use_labels=False, vocab_size=200, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, input_channels=1, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, max_source_positions=30, max_target_positions=40, bos_token_id=98, eos_token_id=98, pad_token_id=0, num_mel_bins=80, decoder_start_token_id=85, num_conv_layers=1, suppress_tokens=None, begin_suppress_tokens=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.input_channels = input_channels self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.num_mel_bins = num_mel_bins self.max_position_embeddings = max_position_embeddings self.max_source_positions = max_source_positions self.max_target_positions = max_target_positions self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id self.decoder_start_token_id = decoder_start_token_id self.num_conv_layers = num_conv_layers self.suppress_tokens = suppress_tokens self.begin_suppress_tokens = begin_suppress_tokens def prepare_config_and_inputs(self): input_features = floats_tensor([self.batch_size, self.num_mel_bins, self.seq_length], self.vocab_size) decoder_input_ids = torch.tensor(self.batch_size * [[self.decoder_start_token_id]], device=torch_device) config = self.get_config() inputs_dict = prepare_whisper_inputs_dict( config, attention_mask=None, input_features=input_features, decoder_input_ids=decoder_input_ids, ) return config, inputs_dict def get_config(self): return WhisperConfig( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, input_channels=self.input_channels, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, max_source_positions=self.max_source_positions, max_target_positions=self.max_target_positions, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_ffn_dim=self.hidden_size, encoder_ffn_dim=self.hidden_size, decoder_start_token_id=self.decoder_start_token_id, suppress_tokens=self.suppress_tokens, begin_suppress_tokens=self.begin_suppress_tokens, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def get_subsampled_output_lengths(self, input_lengths): """ Computes the output length of the convolutional layers """ for i in range(self.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def create_and_check_model_forward(self, config, inputs_dict, freeze_encoder=False): model = WhisperModel(config=config).to(torch_device).eval() if freeze_encoder: model.freeze_encoder() input_features = inputs_dict["input_features"] decoder_input_ids = inputs_dict["decoder_input_ids"] # first forward pass last_hidden_state = model(input_features, decoder_input_ids=decoder_input_ids).last_hidden_state self.parent.assertTrue(last_hidden_state.shape, (13, 7, 16)) def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = WhisperModel(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["decoder_input_ids"] attention_mask = inputs_dict["decoder_attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size).clamp(2) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[ "last_hidden_state" ] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = WhisperModel(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = WhisperEncoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_features"])[0] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = WhisperDecoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (WhisperModel, WhisperForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (WhisperForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( {"automatic-speech-recognition": WhisperForConditionalGeneration, "feature-extraction": WhisperModel} if is_torch_available() else {} ) is_encoder_decoder = True fx_compatible = False test_pruning = False test_missing_keys = False input_name = "input_features" def setUp(self): self.model_tester = WhisperModelTester(self) self.config_tester = ConfigTester(self, config_class=WhisperConfig) self.maxDiff = 3000 def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_model_forward(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs) def test_model_forward_with_frozen_encoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model_forward(*config_and_inputs, freeze_encoder=True) def test_requires_grad_with_frozen_encoder(self): config = self.model_tester.get_config() for model_class in self.all_model_classes: model = model_class(config) model.freeze_encoder() try: encoder_grads = [param.requires_grad for param in model.encoder.parameters()] decoder_grads = [param.requires_grad for param in model.decoder.parameters()] except AttributeError: encoder_grads = [param.requires_grad for param in model.model.encoder.parameters()] decoder_grads = [param.requires_grad for param in model.model.decoder.parameters()] self.assertFalse(all(encoder_grads)) self.assertTrue(all(decoder_grads)) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) def _get_input_ids_and_config(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() input_ids = inputs_dict[self.input_name] # cut to half length & take max batch_size 3 max_batch_size = 3 input_ids = input_ids[:max_batch_size, :, :] # generate max 3 tokens max_length = input_ids.shape[-1] + 3 if config.eos_token_id is not None and config.pad_token_id is None: # hack to allow generate for models such as GPT2 as is done in `generate()` config.pad_token_id = config.eos_token_id return config, input_ids, None, max_length # not implemented currently def test_inputs_embeds(self): pass # training is not supported yet def test_training(self): pass def test_training_gradient_checkpointing(self): pass def test_generate_with_head_masking(self): pass def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() config.max_target_positions = 400 input_features = input_dict["input_features"] model = WhisperForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": input_features = input_features.half() model.half() model.generate(input_features) model.generate(input_features, num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = [ "input_features", "attention_mask", "decoder_input_ids", "decoder_attention_mask", ] expected_arg_names.extend( ["head_mask", "decoder_head_mask", "cross_attn_head_mask", "encoder_outputs"] if "head_mask" and "decoder_head_mask" and "cross_attn_head_mask" in arg_names else ["encoder_outputs"] ) self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names) def test_hidden_states_output(self): def check_hidden_states_output(inputs_dict, config, model_class): model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) hidden_states = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states expected_num_layers = getattr( self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(hidden_states), expected_num_layers) if hasattr(self.model_tester, "encoder_seq_length"): seq_length = self.model_tester.encoder_seq_length else: seq_length = self.model_tester.seq_length subsampled_seq_length = model._get_feat_extract_output_lengths(seq_length) self.assertListEqual( list(hidden_states[0].shape[-2:]), [subsampled_seq_length, self.model_tester.hidden_size], ) if config.is_encoder_decoder: hidden_states = outputs.decoder_hidden_states self.assertIsInstance(hidden_states, (list, tuple)) self.assertEqual(len(hidden_states), expected_num_layers) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) self.assertListEqual( list(hidden_states[0].shape[-2:]), [decoder_seq_length, self.model_tester.hidden_size], ) config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(inputs_dict, config, model_class) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(inputs_dict, config, model_class) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.return_dict = True seq_len = getattr(self.model_tester, "seq_length", None) decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", 1) encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", seq_len) decoder_key_length = getattr(self.model_tester, "decoder_key_length", 1) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False config.return_dict = True model = model_class(config) model.to(torch_device) model.eval() subsampled_encoder_seq_length = model._get_feat_extract_output_lengths(encoder_seq_length) subsampled_encoder_key_length = model._get_feat_extract_output_lengths(encoder_key_length) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) # check that output_attentions also work using config del inputs_dict["output_attentions"] config.output_attentions = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) out_len = len(outputs) correct_outlen = 5 # loss is at first position if "labels" in inputs_dict: correct_outlen += 1 # loss is added to beginning if "past_key_values" in outputs: correct_outlen += 1 # past_key_values have been returned self.assertEqual(out_len, correct_outlen) # decoder attentions decoder_attentions = outputs.decoder_attentions self.assertIsInstance(decoder_attentions, (list, tuple)) self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(decoder_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length], ) # cross attentions cross_attentions = outputs.cross_attentions self.assertIsInstance(cross_attentions, (list, tuple)) self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(cross_attentions[0].shape[-3:]), [ self.model_tester.num_attention_heads, decoder_seq_length, subsampled_encoder_key_length, ], ) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) model.to(torch_device) model.eval() with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) added_hidden_states = 2 self.assertEqual(out_len + added_hidden_states, len(outputs)) self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers) self.assertListEqual( list(self_attentions[0].shape[-3:]), [self.model_tester.num_attention_heads, subsampled_encoder_seq_length, subsampled_encoder_key_length], ) def test_resize_tokens_embeddings(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = model_embed.weight.clone() # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] - 15) # make sure that decoder_input_ids are resized if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings, model_embed.weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) def test_resize_embeddings_untied(self): ( original_config, inputs_dict, ) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return original_config.tie_word_embeddings = False # if model cannot untied embeddings -> leave test if original_config.tie_word_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config).to(torch_device) # if no output embeddings -> leave test if model.get_output_embeddings() is None: continue # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_vocab_size = config.vocab_size model.resize_token_embeddings(model_vocab_size + 10) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size + 10) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size + 10) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model.resize_token_embeddings(model_vocab_size - 15) self.assertEqual(model.config.vocab_size, model_vocab_size - 15) # Check that it actually resizes the embeddings matrix output_embeds = model.get_output_embeddings() self.assertEqual(output_embeds.weight.shape[0], model_vocab_size - 15) # Check bias if present if output_embeds.bias is not None: self.assertEqual(output_embeds.bias.shape[0], model_vocab_size - 15) # Check that the model can still do a forward pass successfully (every parameter should be resized) if "decoder_input_ids" in inputs_dict: inputs_dict["decoder_input_ids"].clamp_(max=model_vocab_size - 15 - 1) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**self._prepare_for_class(inputs_dict, model_class)) def test_generate_without_input_ids(self): pass @staticmethod def _get_encoder_outputs( model, input_ids, attention_mask, output_attentions=None, output_hidden_states=None, num_interleave=1 ): encoder = model.get_encoder() encoder_outputs = encoder( input_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.repeat_interleave( num_interleave, dim=0 ) input_ids = input_ids[:, :, 0] input_ids = torch.zeros_like(input_ids[:, :1], dtype=torch.long) + torch.tensor( [model._get_decoder_start_token_id()], device=input_ids.device ) attention_mask = None return encoder_outputs, input_ids, attention_mask def _check_outputs(self, output, input_ids, config, use_cache=False, num_return_sequences=1): batch_size, mel, seq_length = input_ids.shape subsampled_seq_length = self.model_tester.get_subsampled_output_lengths(seq_length) num_sequences_in_output = batch_size * num_return_sequences gen_len = ( output.sequences.shape[-1] - 1 if config.is_encoder_decoder else output.sequences.shape[-1] - seq_length ) # scores self._check_scores(num_sequences_in_output, output.scores, length=gen_len, config=config) # Attentions # encoder self._check_encoder_attention_for_generate( output.encoder_attentions, batch_size, config, subsampled_seq_length ) # decoder self._check_attentions_for_generate( num_sequences_in_output, output.decoder_attentions, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) # Hidden States # encoder self._check_encoder_hidden_states_for_generate( output.encoder_hidden_states, batch_size, config, subsampled_seq_length ) # decoder self._check_hidden_states_for_generate( num_sequences_in_output, output.decoder_hidden_states, min_length=1, max_length=output.sequences.shape[-1], config=config, use_cache=use_cache, ) def _create_and_check_torchscript(self, config, inputs_dict): if not self.test_torchscript: return configs_no_init = _config_zero_init(config) # To be sure we have no Nan configs_no_init.torchscript = True for model_class in self.all_model_classes: model = model_class(config=configs_no_init) model.to(torch_device) model.eval() inputs = self._prepare_for_class(inputs_dict, model_class) try: model.config.use_cache = False # FSTM still requires this hack -> FSTM should probably be refactored similar to BART afterward input_features = inputs["input_features"] decoder_input_ids = inputs["decoder_input_ids"] decoder_attention_mask = inputs["decoder_attention_mask"] traced_model = torch.jit.trace(model, (input_features, decoder_input_ids, decoder_attention_mask)) except RuntimeError: self.fail("Couldn't trace module.") with tempfile.TemporaryDirectory() as tmp_dir_name: pt_file_name = os.path.join(tmp_dir_name, "traced_model.pt") try: torch.jit.save(traced_model, pt_file_name) except Exception: self.fail("Couldn't save module.") try: loaded_model = torch.jit.load(pt_file_name) except Exception: self.fail("Couldn't load module.") model.to(torch_device) model.eval() loaded_model.to(torch_device) loaded_model.eval() model_state_dict = model.state_dict() loaded_model_state_dict = loaded_model.state_dict() self.assertEqual(set(model_state_dict.keys()), set(loaded_model_state_dict.keys())) models_equal = True for layer_name, p1 in model_state_dict.items(): p2 = loaded_model_state_dict[layer_name] if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) @is_pt_flax_cross_test def test_equivalence_pt_to_flax(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)} fx_state = convert_pytorch_state_dict_to_flax(pt_model.state_dict(), fx_model) fx_model.params = fx_state # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(tmpdirname) fx_model_loaded = fx_model_class.from_pretrained(tmpdirname, input_shape=init_shape, from_pt=True) fx_outputs_loaded = fx_model_loaded(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs_loaded.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs_loaded, pt_outputs, model_class) @is_pt_flax_cross_test def test_equivalence_flax_to_pt(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() init_shape = (1,) + inputs_dict["input_features"].shape[1:] for model_class in self.all_model_classes: with self.subTest(model_class.__name__): fx_model_class_name = "Flax" + model_class.__name__ if not hasattr(transformers, fx_model_class_name): # no flax model exists for this class return # Output all for aggressive testing config.output_hidden_states = True config.output_attentions = self.has_attentions fx_model_class = getattr(transformers, fx_model_class_name) # load PyTorch class pt_model = model_class(config).eval() # Flax models don't use the `use_cache` option and cache is not returned as a default. # So we disable `use_cache` here for PyTorch model. pt_model.config.use_cache = False # load Flax class fx_model = fx_model_class(config, input_shape=init_shape, dtype=jnp.float32) # make sure only flax inputs are forward that actually exist in function args fx_input_keys = inspect.signature(fx_model.__call__).parameters.keys() # prepare inputs pt_inputs = self._prepare_for_class(inputs_dict, model_class) # remove function args that don't exist in Flax pt_inputs = {k: v for k, v in pt_inputs.items() if k in fx_input_keys} # send pytorch inputs to the correct device pt_inputs = { k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs.items() } # convert inputs to Flax fx_inputs = {k: np.array(v) for k, v in pt_inputs.items() if torch.is_tensor(v)} pt_model = load_flax_weights_in_pytorch_model(pt_model, fx_model.params) # make sure weights are tied in PyTorch pt_model.tie_weights() # send pytorch model to the correct device pt_model.to(torch_device) with torch.no_grad(): pt_outputs = pt_model(**pt_inputs) fx_outputs = fx_model(**fx_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs, model_class) with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(tmpdirname) pt_model_loaded = model_class.from_pretrained(tmpdirname, from_flax=True) # send pytorch model to the correct device pt_model_loaded.to(torch_device) pt_model_loaded.eval() with torch.no_grad(): pt_outputs_loaded = pt_model_loaded(**pt_inputs) fx_keys = tuple([k for k, v in fx_outputs.items() if v is not None]) pt_keys = tuple([k for k, v in pt_outputs_loaded.items() if v is not None]) self.assertEqual(fx_keys, pt_keys) self.check_pt_flax_outputs(fx_outputs, pt_outputs_loaded, model_class) def test_mask_feature_prob(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.mask_feature_prob = 0.2 config.mask_feature_length = 2 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.train() # forward pass encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) def test_mask_time_prob(self): config, input_dict = self.model_tester.prepare_config_and_inputs_for_common() config.mask_time_prob = 0.2 config.mask_time_length = 2 for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.train() # forward pass encoder_last_hidden_state = model(**input_dict).encoder_last_hidden_state self.assertTrue(encoder_last_hidden_state.shape, (13, 30, 16)) @require_torch @require_torchaudio class WhisperModelIntegrationTests(unittest.TestCase): @cached_property def default_processor(self): return WhisperProcessor.from_pretrained("openai/whisper-base") def _load_datasamples(self, num_samples): ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation") # automatic decoding with librispeech speech_samples = ds.sort("id").select(range(num_samples))[:num_samples]["audio"] return [x["array"] for x in speech_samples] @slow def test_tiny_logits_librispeech(self): torch_device = "cpu" set_seed(0) model = WhisperModel.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features with torch.no_grad(): logits = model( input_features, decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = torch.tensor( [ 2.9892, -6.7607, 5.7348, 3.6096, 0.2152, -5.7321, 4.8855, -1.6407, 0.2823, -1.5718, 10.4269, 3.4427, 0.0219, -8.0612, 3.4784, 8.4246, 4.0575, -2.2864, 11.1084, 0.9963, 0.9884, -8.5154, -3.5469, -9.3713, 0.9786, 3.5435, 7.4850, -5.2579, -1.4366, 10.4841 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) # fmt: off EXPECTED_GENERATION = torch.tensor( [ -1.4651, -2.6944, 2.7821, 2.3793, 4.0738, 0.0188, -3.3203, 1.9836, 0.0520, 0.7095, 1.1063, 0.2952, -3.6786, -0.5249, 0.3105, 4.7691, 1.1562, 1.3046, 0.5810, -0.3624, 1.7006, 1.3424, 0.9817, 2.1958, 1.8775, -5.7046, -0.7679, 4.0113, 2.6848, 2.8609 ] ) # fmt: on head_logits = logits[0] @ model.decoder.embed_tokens.weight.T self.assertTrue(torch.allclose(head_logits[0, 0, :30].cpu(), EXPECTED_GENERATION, atol=1e-4)) @slow def test_small_en_logits_librispeech(self): set_seed(0) torch_device = "cpu" model = WhisperModel.from_pretrained("openai/whisper-small.en") model.to(torch_device) input_speech = self._load_datasamples(1) feaure_extractor = WhisperFeatureExtractor() input_features = feaure_extractor(input_speech, return_tensors="pt").input_features.to(torch_device) logits = model( input_features, decoder_input_ids=torch.tensor([[model.config.decoder_start_token_id]]), output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T # fmt: off EXPECTED_LOGITS = torch.tensor( [ -3.6784, -7.7211, -9.5070, -11.9286, -7.6489, -9.7026, -5.6188, -8.0104, -4.6238, -5.1833, -9.0485, -3.4079, -5.4874, -2.6935, -6.3479, -7.3398, -6.9558, -7.6867, -7.4748, -8.3463, -9.9781, -10.8389, -10.3105, -11.7201, -9.7261, -7.1590, -5.9272, -12.4509, -11.1146, -8.1918 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_large_logits_librispeech(self): set_seed(0) torch_device = "cpu" model = WhisperModel.from_pretrained("openai/whisper-large") model.to(torch_device) input_speech = self._load_datasamples(1) processor = WhisperProcessor.from_pretrained("openai/whisper-large") processed_inputs = processor( audio=input_speech, text="This part of the speech", add_special_tokens=False, return_tensors="pt" ) input_features = processed_inputs.input_features.to(torch_device) decoder_input_ids = processed_inputs.labels.to(torch_device) logits = model( input_features, decoder_input_ids=decoder_input_ids, output_hidden_states=False, output_attentions=False, use_cache=False, ) logits = logits.last_hidden_state @ model.decoder.embed_tokens.weight.T # fmt: off EXPECTED_LOGITS = torch.tensor( [ 2.1382, 0.9381, 4.4671, 3.5589, 2.4022, 3.8576, -0.6521, 2.5472, 1.8301, 1.9957, 2.3432, 1.4678, 0.5459, 2.2597, 1.5179, 2.5357, 1.1624, 0.6194, 1.0757, 1.8259, 2.4076, 1.6601, 2.3503, 1.3376, 1.9891, 1.8635, 3.8931, 5.3699, 4.4772, 3.9184 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4)) @slow def test_tiny_en_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) model.config.decoder_start_token_id = 50257 input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.batch_decode(generated_ids)[0] EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes, and we are glad to" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, num_beams=5, max_length=20) transcript = processor.tokenizer.decode(generated_ids[0]) EXPECTED_TRANSCRIPT = ( "<|startoftranscript|><|en|><|transcribe|><|notimestamps|> Mr. Quilter is the apostle of the middle" " classes and we are glad" ) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_generation(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") model.to(torch_device) input_speech = self._load_datasamples(1) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Mr. Quilter is the apostle of the middle classes and we are glad" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_generation_multilingual(self): torch_device = "cpu" set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") model.to(torch_device) ds = load_dataset("common_voice", "ja", split="test", streaming=True) ds = ds.cast_column("audio", datasets.Audio(sampling_rate=16_000)) input_speech = next(iter(ds))["audio"]["array"] input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = "木村さんに電話を貸してもらいました" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|en|>", task="transcribe" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " Kimura-san called me." self.assertEqual(transcript, EXPECTED_TRANSCRIPT) generated_ids = model.generate( input_features, do_sample=False, max_length=20, language="<|ja|>", task="translate" ) transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] EXPECTED_TRANSCRIPT = " I borrowed a phone from Kimura san" self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_large_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-large") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large") input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features generated_ids = model.generate(input_features, max_length=20, task="translate") # fmt: off EXPECTED_LOGITS = torch.tensor( [ [50258, 50259, 50358, 50363, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 293, 321, 366, 5404], [50258, 50259, 50358, 50363, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50257], [50258, 50259, 50358, 50363, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904], [50258, 50259, 50358, 50363, 634, 575, 12525, 22618, 1968, 6144, 35617, 20084, 1756, 311, 589, 307, 534, 10281, 934, 439] ] ) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes and we are glad", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_en_batched_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en") model.to(torch_device) input_speech = self._load_datasamples(4) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, max_length=20).to("cpu") # fmt: off EXPECTED_LOGITS = torch.tensor( [ [50257, 50362, 1770, 13, 2264, 346, 353, 318, 262, 46329, 286, 262, 3504, 6097, 11, 290, 356, 389, 9675, 284], [50257, 50362, 5414, 318, 1770, 13, 2264, 346, 353, 338, 5642, 1342, 3499, 621, 465, 2300, 13, 50256, 50256, 50256], [50257, 50362, 679, 4952, 514, 326, 379, 428, 43856, 1622, 286, 262, 614, 11, 351, 6786, 290, 32595, 12023, 28236], [50257, 50362, 679, 468, 12296, 17188, 1771, 7361, 26113, 18881, 1122, 338, 670, 318, 1107, 8312, 706, 477, 290, 460] ] ) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_LOGITS)) # fmt: off EXPECTED_TRANSCRIPT = [ " Mr. Quilter is the apostle of the middle classes, and we are glad to", " Nor is Mr. Quilter's manner less interesting than his matter.", " He tells us that at this festive season of the year, with Christmas and roast beef looming", " He has grave doubts whether Sir Frederick Layton's work is really Greek after all and can", ] # fmt: on transcript = processor.batch_decode(generated_ids, skip_special_tokens=True) self.assertListEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_timestamp_generation(self): set_seed(0) processor = WhisperProcessor.from_pretrained("openai/whisper-tiny") model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny") model.to(torch_device) input_speech = np.concatenate(self._load_datasamples(4)) input_features = processor.feature_extractor(raw_speech=input_speech, return_tensors="pt").input_features.to( torch_device ) generated_ids = model.generate(input_features, max_length=448, return_timestamps=True).to("cpu") # fmt: off EXPECTED_OUTPUT = torch.tensor([50258, 50259, 50359, 50364, 2221, 13, 2326, 388, 391, 307, 264, 50244, 295, 264, 2808, 5359, 11, 293, 321, 366, 5404, 281, 2928, 702, 14943, 13, 50692, 50692, 6966, 307, 2221, 13, 2326, 388, 391, 311, 9060, 1570, 1880, 813, 702, 1871, 13, 50926, 50926, 634, 5112, 505, 300, 412, 341, 42729, 3196, 295, 264, 1064, 11, 365, 5272, 293, 12904, 9256, 450, 10539, 51208, 51208, 949, 505, 11, 14138, 10117, 490, 3936, 293, 1080, 3542, 5160, 881, 26336, 281, 264, 1575, 13, 51552, 51552, 634, 575, 12525, 22618, 1968, 6144, 35617, 7354, 1292, 6, 589, 307, 534, 10281, 934, 439, 11, 293, 51836, 51836, 50257]) # fmt: on self.assertTrue(torch.allclose(generated_ids, EXPECTED_OUTPUT)) EXPECTED_TRANSCRIPT = [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel. Nor is" " Mr. Quilter's manner less interesting than his matter. He tells us that at this festive season" " of the year, with Christmas and roast beef looming before us, similarly drawn from eating and" " its results occur most readily to the mind. He has grave doubts whether Sir Frederick Latins'" " work is really Greek after all, and" ), "offsets": [ { "text": ( " Mr. Quilter is the apostle of the middle classes, and we are glad to welcome his gospel." ), "timestamp": (0.0, 6.5600000000000005), }, { "text": " Nor is Mr. Quilter's manner less interesting than his matter.", "timestamp": (6.5600000000000005, 11.24), }, { "text": ( " He tells us that at this festive season of the year, with Christmas and roast beef" " looming" ), "timestamp": (11.24, 16.88), }, { "text": ( " before us, similarly drawn from eating and its results occur most readily to the mind." ), "timestamp": (16.88, 23.76), }, { "text": ( " He has grave doubts whether Sir Frederick Latins' work is really Greek after all, and" ), "timestamp": (23.76, 29.44), }, ], } ] transcript = processor.batch_decode(generated_ids, skip_special_tokens=True, output_offsets=True) self.assertEqual(transcript, EXPECTED_TRANSCRIPT) @slow def test_tiny_specaugment_librispeech(self): torch_device = "cpu" set_seed(0) # Apply SpecAugment model = WhisperModel.from_pretrained("openai/whisper-tiny", apply_spec_augment=True) # Set model to training mode to enable SpecAugment model.train() model.to(torch_device) input_speech = self._load_datasamples(1) feature_extractor = WhisperFeatureExtractor() input_features = feature_extractor(input_speech, return_tensors="pt").input_features with torch.no_grad(): logits = model( input_features, decoder_input_ids=torch.tensor([[50258, 50259, 50359]]), output_hidden_states=False, output_attentions=False, return_dict=False, use_cache=False, ) # fmt: off EXPECTED_LOGITS = torch.tensor( [ 0.9362, -4.7105, 5.0879, 3.9642, 1.0013, -6.0096, 4.7285, -3.1847, -0.8648, 1.9631, 6.2653, 3.6936, 0.3575, -4.5818, 3.0564, 7.8712, 2.9951, 0.6848, 9.9497, -2.6638, 1.1571, -6.8546, -1.4333, -7.7584, 1.1200, 3.9030, 4.4655, -4.4919, -1.1703, 9.6241 ] ) # fmt: on self.assertTrue(torch.allclose(logits[0][0, 0, :30].cpu(), EXPECTED_LOGITS, atol=1e-4))
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/esm/test_modeling_esmfold.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ESM model. """ import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class EsmFoldModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=False, use_labels=False, vocab_size=19, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): config = EsmConfig( vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=True, esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False}, ) return config def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = EsmForProteinFolding(config=config).float() model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) result = model(input_ids) self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3)) self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class EsmFoldModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_mismatched_shapes = False all_model_classes = (EsmForProteinFolding,) if is_torch_available() else () all_generative_model_classes = () pipeline_model_mapping = {} if is_torch_available() else {} test_sequence_classification_problem_types = False def setUp(self): self.model_tester = EsmFoldModelTester(self) self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip("Does not support attention outputs") def test_attention_outputs(self): pass @unittest.skip def test_correct_missing_keys(self): pass @unittest.skip("Esm does not support embedding resizing") def test_resize_embeddings_untied(self): pass @unittest.skip("Esm does not support embedding resizing") def test_resize_tokens_embeddings(self): pass @unittest.skip("ESMFold does not support passing input embeds!") def test_inputs_embeds(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_integration(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_save_load_from_config_init(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_save_load_from_pretrained(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_headmasking(self): pass @unittest.skip("ESMFold does not output hidden states in the normal way.") def test_hidden_states_output(self): pass @unittest.skip("ESMfold does not output hidden states in the normal way.") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("ESMFold only has one output format.") def test_model_outputs_equivalence(self): pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality") def test_save_load_fast_init_from_base(self): pass @unittest.skip("ESMFold does not support input chunking.") def test_feed_forward_chunking(self): pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.") def test_initialization(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_output_attentions(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_output_hidden_state(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_simple(self): pass @unittest.skip("ESMFold doesn't support data parallel.") def test_multi_gpu_data_parallel_forward(self): pass @require_torch class EsmModelIntegrationTest(TestCasePlus): @slow def test_inference_protein_folding(self): model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1").float() model.eval() input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) position_outputs = model(input_ids)["positions"] expected_slice = torch.tensor([2.5828, 0.7993, -10.9334], dtype=torch.float32) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], expected_slice, atol=1e-4))
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ESM model. """ import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class EsmFoldModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=False, use_input_mask=True, use_token_type_ids=False, use_labels=False, vocab_size=19, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def get_config(self): config = EsmConfig( vocab_size=33, hidden_size=self.hidden_size, pad_token_id=1, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, is_folding_model=True, esmfold_config={"trunk": {"num_blocks": 2}, "fp16_esm": False}, ) return config def create_and_check_model(self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels): model = EsmForProteinFolding(config=config).float() model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask) result = model(input_ids) result = model(input_ids) self.parent.assertEqual(result.positions.shape, (8, self.batch_size, self.seq_length, 14, 3)) self.parent.assertEqual(result.angles.shape, (8, self.batch_size, self.seq_length, 7, 2)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class EsmFoldModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): test_mismatched_shapes = False all_model_classes = (EsmForProteinFolding,) if is_torch_available() else () all_generative_model_classes = () pipeline_model_mapping = {} if is_torch_available() else {} test_sequence_classification_problem_types = False def setUp(self): self.model_tester = EsmFoldModelTester(self) self.config_tester = ConfigTester(self, config_class=EsmConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) @unittest.skip("Does not support attention outputs") def test_attention_outputs(self): pass @unittest.skip def test_correct_missing_keys(self): pass @unittest.skip("Esm does not support embedding resizing") def test_resize_embeddings_untied(self): pass @unittest.skip("Esm does not support embedding resizing") def test_resize_tokens_embeddings(self): pass @unittest.skip("ESMFold does not support passing input embeds!") def test_inputs_embeds(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_integration(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_save_load_from_config_init(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_head_pruning_save_load_from_pretrained(self): pass @unittest.skip("ESMFold does not support head pruning.") def test_headmasking(self): pass @unittest.skip("ESMFold does not output hidden states in the normal way.") def test_hidden_states_output(self): pass @unittest.skip("ESMfold does not output hidden states in the normal way.") def test_retain_grad_hidden_states_attentions(self): pass @unittest.skip("ESMFold only has one output format.") def test_model_outputs_equivalence(self): pass @unittest.skip("This test doesn't work for ESMFold and doesn't test core functionality") def test_save_load_fast_init_from_base(self): pass @unittest.skip("ESMFold does not support input chunking.") def test_feed_forward_chunking(self): pass @unittest.skip("ESMFold doesn't respect you and it certainly doesn't respect your initialization arguments.") def test_initialization(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_output_attentions(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_output_hidden_state(self): pass @unittest.skip("ESMFold doesn't support torchscript compilation.") def test_torchscript_simple(self): pass @unittest.skip("ESMFold doesn't support data parallel.") def test_multi_gpu_data_parallel_forward(self): pass @require_torch class EsmModelIntegrationTest(TestCasePlus): @slow def test_inference_protein_folding(self): model = EsmForProteinFolding.from_pretrained("facebook/esmfold_v1").float() model.eval() input_ids = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]]) position_outputs = model(input_ids)["positions"] expected_slice = torch.tensor([2.5828, 0.7993, -10.9334], dtype=torch.float32) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0], expected_slice, atol=1e-4))
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/rembert/__init__.py
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/longformer/test_tokenization_longformer.py
# coding=utf-8 # Copyright 2022 Tsimur Hadeliya. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the Longformer tokenizer. """ import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin # Copied from transformers.tests.roberta.test_modeling_roberta.py with Roberta->Longformer @require_tokenizers class LongformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LongformerTokenizer test_slow_tokenizer = True rust_tokenizer_class = LongformerTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "lower newer" bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] tokens = tokenizer.tokenize(text) # , add_prefix_space=True) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def longformer_dict_integration_testing(self): tokenizer = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2]) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418", add_special_tokens=False), [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2], ) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_text_from_decode = tokenizer.encode( "sequence builders", add_special_tokens=True, add_prefix_space=False ) encoded_pair_from_decode = tokenizer.encode( "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False ) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def test_space_encoding(self): tokenizer = self.get_tokenizer() sequence = "Encode this sequence." space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]] # Testing encoder arguments encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertNotEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertEqual(first_char, space_encoding) tokenizer.add_special_tokens({"bos_token": "<s>"}) encoded = tokenizer.encode(sequence, add_special_tokens=True) first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0] self.assertNotEqual(first_char, space_encoding) # Testing spaces after special tokens mask = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)} ) # mask token has a left space mask_ind = tokenizer.convert_tokens_to_ids(mask) sequence = "Encode <mask> sequence" sequence_nospace = "Encode <mask>sequence" encoded = tokenizer.encode(sequence) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence_nospace) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertNotEqual(first_char, space_encoding) def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def test_change_add_prefix_space_and_trim_offsets_args(self): for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2): tokenizer_r = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=True, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets ) pre_tokenizer_state = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__()) post_processor_state = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__()) self.assertEqual(pre_tokenizer_state["add_prefix_space"], add_prefix_space) self.assertEqual(post_processor_state["add_prefix_space"], add_prefix_space) self.assertEqual(post_processor_state["trim_offsets"], trim_offsets) def test_offsets_mapping_with_different_add_prefix_space_and_trim_space_arguments(self): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): text_of_1_token = "hello" # `hello` is a token in the vocabulary of `pretrained_name` text = f"{text_of_1_token} {text_of_1_token}" tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)), ) text = f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), )
# coding=utf-8 # Copyright 2022 Tsimur Hadeliya. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the Longformer tokenizer. """ import itertools import json import os import unittest from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin # Copied from transformers.tests.roberta.test_modeling_roberta.py with Roberta->Longformer @require_tokenizers class LongformerTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LongformerTokenizer test_slow_tokenizer = True rust_tokenizer_class = LongformerTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "lower newer" output_text = "lower newer" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file, self.merges_file, **self.special_tokens_map) text = "lower newer" bpe_tokens = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"] tokens = tokenizer.tokenize(text) # , add_prefix_space=True) self.assertListEqual(tokens, bpe_tokens) input_tokens = tokens + [tokenizer.unk_token] input_bpe_tokens = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19] self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens) def longformer_dict_integration_testing(self): tokenizer = self.get_tokenizer() self.assertListEqual(tokenizer.encode("Hello world!", add_special_tokens=False), [0, 31414, 232, 328, 2]) self.assertListEqual( tokenizer.encode("Hello world! cécé herlolip 418", add_special_tokens=False), [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2], ) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_text_from_decode = tokenizer.encode( "sequence builders", add_special_tokens=True, add_prefix_space=False ) encoded_pair_from_decode = tokenizer.encode( "sequence builders", "multi-sequence build", add_special_tokens=True, add_prefix_space=False ) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == encoded_text_from_decode assert encoded_pair == encoded_pair_from_decode def test_space_encoding(self): tokenizer = self.get_tokenizer() sequence = "Encode this sequence." space_encoding = tokenizer.byte_encoder[" ".encode("utf-8")[0]] # Testing encoder arguments encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=False) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertNotEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence, add_special_tokens=False, add_prefix_space=True) first_char = tokenizer.convert_ids_to_tokens(encoded[0])[0] self.assertEqual(first_char, space_encoding) tokenizer.add_special_tokens({"bos_token": "<s>"}) encoded = tokenizer.encode(sequence, add_special_tokens=True) first_char = tokenizer.convert_ids_to_tokens(encoded[1])[0] self.assertNotEqual(first_char, space_encoding) # Testing spaces after special tokens mask = "<mask>" tokenizer.add_special_tokens( {"mask_token": AddedToken(mask, lstrip=True, rstrip=False)} ) # mask token has a left space mask_ind = tokenizer.convert_tokens_to_ids(mask) sequence = "Encode <mask> sequence" sequence_nospace = "Encode <mask>sequence" encoded = tokenizer.encode(sequence) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertEqual(first_char, space_encoding) encoded = tokenizer.encode(sequence_nospace) mask_loc = encoded.index(mask_ind) first_char = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1])[0] self.assertNotEqual(first_char, space_encoding) def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) # token_type_ids should put 0 everywhere self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) # attention_mask should put 1 everywhere, so sum over length should be 1 self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) # Rust correctly handles the space before the mask while python doesnt self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) def test_change_add_prefix_space_and_trim_offsets_args(self): for trim_offsets, add_prefix_space in itertools.product([True, False], repeat=2): tokenizer_r = self.rust_tokenizer_class.from_pretrained( self.tmpdirname, use_fast=True, add_prefix_space=add_prefix_space, trim_offsets=trim_offsets ) pre_tokenizer_state = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__()) post_processor_state = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__()) self.assertEqual(pre_tokenizer_state["add_prefix_space"], add_prefix_space) self.assertEqual(post_processor_state["add_prefix_space"], add_prefix_space) self.assertEqual(post_processor_state["trim_offsets"], trim_offsets) def test_offsets_mapping_with_different_add_prefix_space_and_trim_space_arguments(self): # Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and # `trim_offsets` for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): text_of_1_token = "hello" # `hello` is a token in the vocabulary of `pretrained_name` text = f"{text_of_1_token} {text_of_1_token}" tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token) + 1, len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (len(text_of_1_token), len(text_of_1_token) + 1 + len(text_of_1_token)), ) text = f" {text}" # tokenizer_r = self.rust_tokenizer_class.from_pretrained( # pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True # ) # encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) # self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) # self.assertEqual( # encoding.offset_mapping[1], # (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), # ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=True ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), ) tokenizer_r = self.rust_tokenizer_class.from_pretrained( pretrained_name, use_fast=True, add_prefix_space=False, trim_offsets=False ) encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False) self.assertEqual(encoding.offset_mapping[0], (0, 1 + len(text_of_1_token))) self.assertEqual( encoding.offset_mapping[1], (1 + len(text_of_1_token), 1 + len(text_of_1_token) + 1 + len(text_of_1_token)), )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/mpnet/__init__.py
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/mbart50/__init__.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_mbart50"] = ["MBart50Tokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_mbart50_fast"] = ["MBart50TokenizerFast"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart50 import MBart50Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart50_fast import MBart50TokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available _import_structure = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_mbart50"] = ["MBart50Tokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _import_structure["tokenization_mbart50_fast"] = ["MBart50TokenizerFast"] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart50 import MBart50Tokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mbart50_fast import MBart50TokenizerFast else: import sys sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/perceiver/test_tokenization_perceiver.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" class PerceiverTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = PerceiverTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() tokenizer = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def perceiver_tokenizer(self): return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver") def get_tokenizer(self, **kwargs) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. toks = [] for i in range(len(tokenizer)): try: tok = tokenizer.decode([i], clean_up_tokenization_spaces=False) except UnicodeDecodeError: pass toks.append((i, tok)) toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def test_multibytes_char(self): tokenizer = self.perceiver_tokenizer src_text = "Unicode €." encoded = tokenizer(src_text) encoded_ids = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "[CLS]Unicode €.[SEP]") encoded = tokenizer("e è é ê ë") encoded_ids = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "[CLS]e è é ê ë[SEP]") # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "[CLS]e è é ê ë[SEP]") def test_prepare_batch_integration(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off expected_src_tokens = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 38), batch.input_ids.shape) self.assertEqual((2, 38), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length_integration(self): tokenizer = self.perceiver_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) # cannot use default save_and_load_tokenzier test method because tokenzier has no vocab def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens}) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # There is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens # We need to add the extra_ids in the list of the arg additional_special_tokens def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_decode_invalid_byte_id(self): tokenizer = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178]), "�") # tokenizer can be instantiated without any pretrained files, so no need for pretrained tokenizer list def test_pretrained_model_lists(self): pass # tokenizer does not have vocabulary def test_get_vocab(self): pass # inputs cannot be pretokenized since ids depend on whole input string and not just on single characters def test_pretokenized_inputs(self): pass # tests all ids in vocab => vocab doesn't exist so unnecessary to test def test_conversion_reversible(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str)
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import re import shutil import tempfile import unittest from typing import Tuple from transformers import AddedToken, BatchEncoding, PerceiverTokenizer from transformers.utils import cached_property, is_tf_available, is_torch_available from ...test_tokenization_common import TokenizerTesterMixin if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" class PerceiverTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = PerceiverTokenizer test_rust_tokenizer = False def setUp(self): super().setUp() tokenizer = PerceiverTokenizer() tokenizer.save_pretrained(self.tmpdirname) @cached_property def perceiver_tokenizer(self): return PerceiverTokenizer.from_pretrained("deepmind/language-perceiver") def get_tokenizer(self, **kwargs) -> PerceiverTokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_clean_sequence(self, tokenizer, with_prefix_space=False, max_length=20, min_length=5) -> Tuple[str, list]: # XXX The default common tokenizer tests assume that every ID is decodable on its own. # This assumption is invalid for Perceiver because single bytes might not be # valid utf-8 (byte 128 for instance). # Here we're overriding the smallest possible method to provide # a clean sequence without making the same assumption. toks = [] for i in range(len(tokenizer)): try: tok = tokenizer.decode([i], clean_up_tokenization_spaces=False) except UnicodeDecodeError: pass toks.append((i, tok)) toks = list(filter(lambda t: re.match(r"^[ a-zA-Z]+$", t[1]), toks)) toks = list(filter(lambda t: [t[0]] == tokenizer.encode(t[1], add_special_tokens=False), toks)) if max_length is not None and len(toks) > max_length: toks = toks[:max_length] if min_length is not None and len(toks) < min_length and len(toks) > 0: while len(toks) < min_length: toks = toks + toks # toks_str = [t[1] for t in toks] toks_ids = [t[0] for t in toks] # Ensure consistency output_txt = tokenizer.decode(toks_ids, clean_up_tokenization_spaces=False) if " " not in output_txt and len(toks_ids) > 1: output_txt = ( tokenizer.decode([toks_ids[0]], clean_up_tokenization_spaces=False) + " " + tokenizer.decode(toks_ids[1:], clean_up_tokenization_spaces=False) ) if with_prefix_space: output_txt = " " + output_txt output_ids = tokenizer.encode(output_txt, add_special_tokens=False) return output_txt, output_ids def test_multibytes_char(self): tokenizer = self.perceiver_tokenizer src_text = "Unicode €." encoded = tokenizer(src_text) encoded_ids = [4, 91, 116, 111, 105, 117, 106, 107, 38, 232, 136, 178, 52, 5] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "[CLS]Unicode €.[SEP]") encoded = tokenizer("e è é ê ë") encoded_ids = [4, 107, 38, 201, 174, 38, 201, 175, 38, 201, 176, 38, 201, 177, 5] self.assertEqual(encoded["input_ids"], encoded_ids) # decoding decoded = tokenizer.decode(encoded_ids) self.assertEqual(decoded, "[CLS]e è é ê ë[SEP]") # encode/decode, but with `encode` instead of `__call__` self.assertEqual(tokenizer.decode(tokenizer.encode("e è é ê ë")), "[CLS]e è é ê ë[SEP]") def test_prepare_batch_integration(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] # fmt: off expected_src_tokens = [4, 71, 38, 114, 117, 116, 109, 38, 118, 103, 120, 103, 109, 120, 103, 118, 110, 38, 108, 117, 120, 38, 121, 123, 115, 115, 103, 120, 111, 128, 103, 122, 111, 117, 116, 52, 5, 0] # fmt: on batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 38), batch.input_ids.shape) self.assertEqual((2, 38), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.perceiver_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length_integration(self): tokenizer = self.perceiver_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] targets = tokenizer( text_target=tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) # cannot use default save_and_load_tokenzier test method because tokenzier has no vocab def test_save_and_load_tokenizer(self): # safety check on max_len default value so we are sure the test works tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): self.assertNotEqual(tokenizer.model_max_length, 42) # Now let's start the test tokenizers = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) shutil.rmtree(tmpdirname) tokenizers = self.get_tokenizers(model_max_length=42) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): # Isolate this from the other tests because we save additional tokens/etc tmpdirname = tempfile.mkdtemp() sample_text = " He is very happy, UNwant\u00E9d,running" tokenizer.add_tokens(["bim", "bambam"]) additional_special_tokens = tokenizer.additional_special_tokens additional_special_tokens.append("new_additional_special_token") tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens}) before_tokens = tokenizer.encode(sample_text, add_special_tokens=False) tokenizer.save_pretrained(tmpdirname) after_tokenizer = tokenizer.__class__.from_pretrained(tmpdirname) after_tokens = after_tokenizer.encode(sample_text, add_special_tokens=False) self.assertListEqual(before_tokens, after_tokens) self.assertIn("new_additional_special_token", after_tokenizer.additional_special_tokens) self.assertEqual(after_tokenizer.model_max_length, 42) tokenizer = tokenizer.__class__.from_pretrained(tmpdirname, model_max_length=43) self.assertEqual(tokenizer.model_max_length, 43) shutil.rmtree(tmpdirname) # There is a conflict between the default value of extra_ids and adding a new special token through additional_special_tokens # We need to add the extra_ids in the list of the arg additional_special_tokens def test_special_tokens_initialization_with_non_empty_additional_special_tokens(self): tokenizer_list = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer())) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer())) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(tmp_dir) with open(os.path.join(tmp_dir, "special_tokens_map.json"), encoding="utf-8") as json_file: special_tokens_map = json.load(json_file) with open(os.path.join(tmp_dir, "tokenizer_config.json"), encoding="utf-8") as json_file: tokenizer_config = json.load(json_file) added_tokens_extra_ids = [f"<extra_id_{i}>" for i in range(125)] special_tokens_map["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] tokenizer_config["additional_special_tokens"] = added_tokens_extra_ids + [ "an_additional_special_token" ] with open(os.path.join(tmp_dir, "special_tokens_map.json"), "w", encoding="utf-8") as outfile: json.dump(special_tokens_map, outfile) with open(os.path.join(tmp_dir, "tokenizer_config.json"), "w", encoding="utf-8") as outfile: json.dump(tokenizer_config, outfile) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files tokenizer_without_change_in_init = tokenizer_class.from_pretrained( tmp_dir, ) self.assertIn( "an_additional_special_token", tokenizer_without_change_in_init.additional_special_tokens ) self.assertEqual( ["an_additional_special_token"], tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids(["an_additional_special_token"]) ), ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained new_added_tokens = added_tokens_extra_ids + [AddedToken("a_new_additional_special_token", lstrip=True)] tokenizer = tokenizer_class.from_pretrained( tmp_dir, additional_special_tokens=new_added_tokens, ) self.assertIn("a_new_additional_special_token", tokenizer.additional_special_tokens) self.assertEqual( ["a_new_additional_special_token"], tokenizer.convert_ids_to_tokens( tokenizer.convert_tokens_to_ids(["a_new_additional_special_token"]) ), ) def test_decode_invalid_byte_id(self): tokenizer = self.perceiver_tokenizer self.assertEqual(tokenizer.decode([178]), "�") # tokenizer can be instantiated without any pretrained files, so no need for pretrained tokenizer list def test_pretrained_model_lists(self): pass # tokenizer does not have vocabulary def test_get_vocab(self): pass # inputs cannot be pretokenized since ids depend on whole input string and not just on single characters def test_pretokenized_inputs(self): pass # tests all ids in vocab => vocab doesn't exist so unnecessary to test def test_conversion_reversible(self): pass def test_convert_tokens_to_string_format(self): # The default common tokenizer tests uses invalid tokens for Perceiver that can only accept one-character # strings and special added tokens as tokens tokenizers = self.get_tokenizers(fast=True, do_lower_case=True) for tokenizer in tokenizers: with self.subTest(f"{tokenizer.__class__.__name__}"): tokens = ["[CLS]", "t", "h", "i", "s", " ", "i", "s", " ", "a", " ", "t", "e", "s", "t", "[SEP]"] string = tokenizer.convert_tokens_to_string(tokens) self.assertIsInstance(string, str)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/efficientnet/image_processing_efficientnet.py
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for EfficientNet.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class EfficientNetImageProcessor(BaseImageProcessor): r""" Constructs a EfficientNet image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 346, "width": 346}`): Size of the image after `resize`. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.NEAREST`): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_center_crop (`bool`, *optional*, defaults to `False`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 289, "width": 289}`): Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. rescale_offset (`bool`, *optional*, defaults to `False`): Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. include_top (`bool`, *optional*, defaults to `True`): Whether to rescale the image again. Should be set to True if the inputs are used for image classification. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PIL.Image.NEAREST, do_center_crop: bool = False, crop_size: Dict[str, int] = None, rescale_factor: Union[int, float] = 1 / 255, rescale_offset: bool = False, do_rescale: bool = True, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, include_top: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 346, "width": 346} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {"height": 289, "width": 289} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.rescale_offset = rescale_offset self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.include_top = include_top def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PIL.Image.NEAREST, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])` using the specified resampling filter. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.NEAREST`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") return resize( image, size=(size["height"], size["width"]), resample=resample, data_format=data_format, **kwargs ) def center_crop( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image to `(crop_size["height"], crop_size["width"])`. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Args: image (`np.ndarray`): Image to center crop. size (`Dict[str, int]`): Size of the output image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs) def rescale( self, image: np.ndarray, scale: Union[int, float], offset: bool = True, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Rescale an image by a scale factor. image = image * scale. Args: image (`np.ndarray`): Image to rescale. scale (`int` or `float`): Scale to apply to the image. offset (`bool`, *optional*): Whether to scale the image in both negative and positive directions. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ if offset: rescaled_image = (image - 127.5) * scale if data_format is not None: rescaled_image = to_channel_dimension_format(rescaled_image, data_format) rescaled_image = rescaled_image.astype(np.float32) else: rescaled_image = rescale(image, scale=scale, data_format=data_format, **kwargs) return rescale(image, scale=scale, data_format=data_format, **kwargs) def normalize( self, image: np.ndarray, mean: Union[float, List[float]], std: Union[float, List[float]], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. Args: image (`np.ndarray`): Image to normalize. image_mean (`float` or `List[float]`): Image mean. image_std (`float` or `List[float]`): Image standard deviation. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample=None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, rescale_offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, include_top: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after `resize`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be padded with zeros and then cropped do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`): Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. include_top (`bool`, *optional*, defaults to `self.include_top`): Rescales the image again for image classification if set to True. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - `None`: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor rescale_offset = rescale_offset if rescale_offset is not None else self.rescale_offset do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std include_top = include_top if include_top is not None else self.include_top size = size if size is not None else self.size size = get_size_dict(size) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_resize: images = [self.resize(image=image, size=size, resample=resample) for image in images] if do_center_crop: images = [self.center_crop(image=image, size=crop_size) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, offset=rescale_offset) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images] if include_top: images = [self.normalize(image=image, mean=[0, 0, 0], std=image_std) for image in images] images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Image processor class for EfficientNet.""" from typing import Dict, List, Optional, Union import numpy as np from transformers.utils import is_vision_available from transformers.utils.generic import TensorType from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import logging if is_vision_available(): import PIL logger = logging.get_logger(__name__) class EfficientNetImageProcessor(BaseImageProcessor): r""" Constructs a EfficientNet image processor. Args: do_resize (`bool`, *optional*, defaults to `True`): Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by `do_resize` in `preprocess`. size (`Dict[str, int]` *optional*, defaults to `{"height": 346, "width": 346}`): Size of the image after `resize`. Can be overridden by `size` in `preprocess`. resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.NEAREST`): Resampling filter to use if resizing the image. Can be overridden by `resample` in `preprocess`. do_center_crop (`bool`, *optional*, defaults to `False`): Whether to center crop the image. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Can be overridden by `do_center_crop` in `preprocess`. crop_size (`Dict[str, int]`, *optional*, defaults to `{"height": 289, "width": 289}`): Desired output size when applying center-cropping. Can be overridden by `crop_size` in `preprocess`. do_rescale (`bool`, *optional*, defaults to `True`): Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the `do_rescale` parameter in the `preprocess` method. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`): Scale factor to use if rescaling the image. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. rescale_offset (`bool`, *optional*, defaults to `False`): Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. Can be overridden by the `rescale_factor` parameter in the `preprocess` method. do_normalize (`bool`, *optional*, defaults to `True`): Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess` method. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_MEAN`): Mean to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_STANDARD_STD`): Standard deviation to use if normalizing the image. This is a float or list of floats the length of the number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method. include_top (`bool`, *optional*, defaults to `True`): Whether to rescale the image again. Should be set to True if the inputs are used for image classification. """ model_input_names = ["pixel_values"] def __init__( self, do_resize: bool = True, size: Dict[str, int] = None, resample: PILImageResampling = PIL.Image.NEAREST, do_center_crop: bool = False, crop_size: Dict[str, int] = None, rescale_factor: Union[int, float] = 1 / 255, rescale_offset: bool = False, do_rescale: bool = True, do_normalize: bool = True, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, include_top: bool = True, **kwargs, ) -> None: super().__init__(**kwargs) size = size if size is not None else {"height": 346, "width": 346} size = get_size_dict(size) crop_size = crop_size if crop_size is not None else {"height": 289, "width": 289} crop_size = get_size_dict(crop_size, param_name="crop_size") self.do_resize = do_resize self.size = size self.resample = resample self.do_center_crop = do_center_crop self.crop_size = crop_size self.do_rescale = do_rescale self.rescale_factor = rescale_factor self.rescale_offset = rescale_offset self.do_normalize = do_normalize self.image_mean = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN self.image_std = image_std if image_std is not None else IMAGENET_STANDARD_STD self.include_top = include_top def resize( self, image: np.ndarray, size: Dict[str, int], resample: PILImageResampling = PIL.Image.NEAREST, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Resize an image to `(size["height"], size["width"])` using the specified resampling filter. Args: image (`np.ndarray`): Image to resize. size (`Dict[str, int]`): Size of the output image. resample (`PILImageResampling` filter, *optional*, defaults to `PILImageResampling.NEAREST`): Resampling filter to use when resizing the image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") return resize( image, size=(size["height"], size["width"]), resample=resample, data_format=data_format, **kwargs ) def center_crop( self, image: np.ndarray, size: Dict[str, int], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Center crop an image to `(crop_size["height"], crop_size["width"])`. If the input size is smaller than `crop_size` along any edge, the image is padded with 0's and then center cropped. Args: image (`np.ndarray`): Image to center crop. size (`Dict[str, int]`): Size of the output image. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ size = get_size_dict(size) if "height" not in size or "width" not in size: raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}") return center_crop(image, size=(size["height"], size["width"]), data_format=data_format, **kwargs) def rescale( self, image: np.ndarray, scale: Union[int, float], offset: bool = True, data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ): """ Rescale an image by a scale factor. image = image * scale. Args: image (`np.ndarray`): Image to rescale. scale (`int` or `float`): Scale to apply to the image. offset (`bool`, *optional*): Whether to scale the image in both negative and positive directions. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ if offset: rescaled_image = (image - 127.5) * scale if data_format is not None: rescaled_image = to_channel_dimension_format(rescaled_image, data_format) rescaled_image = rescaled_image.astype(np.float32) else: rescaled_image = rescale(image, scale=scale, data_format=data_format, **kwargs) return rescale(image, scale=scale, data_format=data_format, **kwargs) def normalize( self, image: np.ndarray, mean: Union[float, List[float]], std: Union[float, List[float]], data_format: Optional[Union[str, ChannelDimension]] = None, **kwargs, ) -> np.ndarray: """ Normalize an image. image = (image - image_mean) / image_std. Args: image (`np.ndarray`): Image to normalize. image_mean (`float` or `List[float]`): Image mean. image_std (`float` or `List[float]`): Image standard deviation. data_format (`str` or `ChannelDimension`, *optional*): The channel dimension format of the image. If not provided, it will be the same as the input image. """ return normalize(image, mean=mean, std=std, data_format=data_format, **kwargs) def preprocess( self, images: ImageInput, do_resize: bool = None, size: Dict[str, int] = None, resample=None, do_center_crop: bool = None, crop_size: Dict[str, int] = None, do_rescale: bool = None, rescale_factor: float = None, rescale_offset: bool = None, do_normalize: bool = None, image_mean: Optional[Union[float, List[float]]] = None, image_std: Optional[Union[float, List[float]]] = None, include_top: bool = None, return_tensors: Optional[Union[str, TensorType]] = None, data_format: ChannelDimension = ChannelDimension.FIRST, **kwargs, ) -> PIL.Image.Image: """ Preprocess an image or batch of images. Args: images (`ImageInput`): Image to preprocess. do_resize (`bool`, *optional*, defaults to `self.do_resize`): Whether to resize the image. size (`Dict[str, int]`, *optional*, defaults to `self.size`): Size of the image after `resize`. resample (`PILImageResampling`, *optional*, defaults to `self.resample`): PILImageResampling filter to use if resizing the image Only has an effect if `do_resize` is set to `True`. do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`): Whether to center crop the image. crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`): Size of the image after center crop. If one edge the image is smaller than `crop_size`, it will be padded with zeros and then cropped do_rescale (`bool`, *optional*, defaults to `self.do_rescale`): Whether to rescale the image values between [0 - 1]. rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`): Rescale factor to rescale the image by if `do_rescale` is set to `True`. rescale_offset (`bool`, *optional*, defaults to `self.rescale_offset`): Whether to rescale the image between [-scale_range, scale_range] instead of [0, scale_range]. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`): Whether to normalize the image. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`): Image mean. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`): Image standard deviation. include_top (`bool`, *optional*, defaults to `self.include_top`): Rescales the image again for image classification if set to True. return_tensors (`str` or `TensorType`, *optional*): The type of tensors to return. Can be one of: - `None`: Return a list of `np.ndarray`. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`): The channel dimension format for the output image. Can be one of: - `ChannelDimension.FIRST`: image in (num_channels, height, width) format. - `ChannelDimension.LAST`: image in (height, width, num_channels) format. """ do_resize = do_resize if do_resize is not None else self.do_resize resample = resample if resample is not None else self.resample do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop do_rescale = do_rescale if do_rescale is not None else self.do_rescale rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor rescale_offset = rescale_offset if rescale_offset is not None else self.rescale_offset do_normalize = do_normalize if do_normalize is not None else self.do_normalize image_mean = image_mean if image_mean is not None else self.image_mean image_std = image_std if image_std is not None else self.image_std include_top = include_top if include_top is not None else self.include_top size = size if size is not None else self.size size = get_size_dict(size) crop_size = crop_size if crop_size is not None else self.crop_size crop_size = get_size_dict(crop_size, param_name="crop_size") images = make_list_of_images(images) if not valid_images(images): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True.") if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True.") if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True.") if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True.") # All transformations expect numpy arrays. images = [to_numpy_array(image) for image in images] if do_resize: images = [self.resize(image=image, size=size, resample=resample) for image in images] if do_center_crop: images = [self.center_crop(image=image, size=crop_size) for image in images] if do_rescale: images = [self.rescale(image=image, scale=rescale_factor, offset=rescale_offset) for image in images] if do_normalize: images = [self.normalize(image=image, mean=image_mean, std=image_std) for image in images] if include_top: images = [self.normalize(image=image, mean=[0, 0, 0], std=image_std) for image in images] images = [to_channel_dimension_format(image, data_format) for image in images] data = {"pixel_values": images} return BatchFeature(data=data, tensor_type=return_tensors)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/vit_mae/test_modeling_vit_mae.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ViTMAE model. """ import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTFeatureExtractor class ViTMAEModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, mask_ratio=0.6, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTMAEModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = ViTMAEForPreTraining(config) model.to(torch_device) model.eval() result = model(pixel_values) num_patches = (self.image_size // self.patch_size) ** 2 expected_num_channels = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) # test greyscale images config.num_channels = 1 model = ViTMAEForPreTraining(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) expected_num_channels = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViTMAE does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMAEConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) # overwrite from common since ViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): # make masks reproducible np.random.seed(2) num_patches = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) pt_noise = torch.from_numpy(noise) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument pt_inputs_dict["noise"] = pt_noise super().check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): after_outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # Make sure we don't have nans out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_determinism(self): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""") def test_model_outputs_equivalence(self): pass @slow def test_model_from_pretrained(self): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTMAEModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ViTFeatureExtractor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None @slow def test_inference_for_pretraining(self): # make random mask reproducible across the PT and TF model np.random.seed(2) model = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) vit_mae_config = ViTMAEConfig() num_patches = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) noise = np.random.uniform(size=(1, num_patches)) # forward pass with torch.no_grad(): outputs = model(**inputs, noise=torch.from_numpy(noise).to(device=torch_device)) # verify the logits expected_shape = torch.Size((1, 196, 768)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(torch_device), atol=1e-4))
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch ViTMAE model. """ import inspect import math import tempfile import unittest import numpy as np from transformers import ViTMAEConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMAEForPreTraining, ViTMAEModel from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTFeatureExtractor class ViTMAEModelTester: def __init__( self, parent, batch_size=13, image_size=30, patch_size=2, num_channels=3, is_training=True, use_labels=True, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, type_sequence_label_size=10, initializer_range=0.02, num_labels=3, mask_ratio=0.6, scope=None, ): self.parent = parent self.batch_size = batch_size self.image_size = image_size self.patch_size = patch_size self.num_channels = num_channels self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.mask_ratio = mask_ratio self.scope = scope # in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above # (we add 1 for the [CLS] token) num_patches = (image_size // patch_size) ** 2 self.seq_length = int(math.ceil((1 - mask_ratio) * (num_patches + 1))) def prepare_config_and_inputs(self): pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) labels = None if self.use_labels: labels = ids_tensor([self.batch_size], self.type_sequence_label_size) config = self.get_config() return config, pixel_values, labels def get_config(self): return ViTMAEConfig( image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, is_decoder=False, initializer_range=self.initializer_range, mask_ratio=self.mask_ratio, ) def create_and_check_model(self, config, pixel_values, labels): model = ViTMAEModel(config=config) model.to(torch_device) model.eval() result = model(pixel_values) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_pretraining(self, config, pixel_values, labels): model = ViTMAEForPreTraining(config) model.to(torch_device) model.eval() result = model(pixel_values) num_patches = (self.image_size // self.patch_size) ** 2 expected_num_channels = self.patch_size**2 * self.num_channels self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) # test greyscale images config.num_channels = 1 model = ViTMAEForPreTraining(config) model.to(torch_device) model.eval() pixel_values = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) result = model(pixel_values) expected_num_channels = self.patch_size**2 self.parent.assertEqual(result.logits.shape, (self.batch_size, num_patches, expected_num_channels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() config, pixel_values, labels = config_and_inputs inputs_dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class ViTMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase): """ Here we also overwrite some of the tests of test_modeling_common.py, as ViTMAE does not use input_ids, inputs_embeds, attention_mask and seq_length. """ all_model_classes = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else () pipeline_model_mapping = {"feature-extraction": ViTMAEModel} if is_torch_available() else {} test_pruning = False test_torchscript = False test_resize_embeddings = False test_head_masking = False def setUp(self): self.model_tester = ViTMAEModelTester(self) self.config_tester = ConfigTester(self, config_class=ViTMAEConfig, has_text_modality=False, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() @unittest.skip(reason="ViTMAE does not use inputs_embeds") def test_inputs_embeds(self): pass def test_model_common_attributes(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Module)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.Linear)) def test_forward_signature(self): config, _ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) signature = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic arg_names = [*signature.parameters.keys()] expected_arg_names = ["pixel_values"] self.assertListEqual(arg_names[:1], expected_arg_names) def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*config_and_inputs) # overwrite from common since ViTMAEForPretraining has random masking, we need to fix the noise # to generate masks during test def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict): # make masks reproducible np.random.seed(2) num_patches = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2) noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches)) pt_noise = torch.from_numpy(noise) # Add `noise` argument. # PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument pt_inputs_dict["noise"] = pt_noise super().check_pt_tf_models(tf_model, pt_model, pt_inputs_dict) def test_save_load(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) model.to(torch_device) model.eval() # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): outputs = model(**self._prepare_for_class(inputs_dict, model_class)) out_2 = outputs[0].cpu().numpy() out_2[np.isnan(out_2)] = 0 with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) model.to(torch_device) # make random mask reproducible torch.manual_seed(2) with torch.no_grad(): after_outputs = model(**self._prepare_for_class(inputs_dict, model_class)) # Make sure we don't have nans out_1 = after_outputs[0].cpu().numpy() out_1[np.isnan(out_1)] = 0 max_diff = np.amax(np.abs(out_1 - out_2)) self.assertLessEqual(max_diff, 1e-5) @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_determinism(self): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_save_load_fast_init_from_base(self): pass @unittest.skip( reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load to get deterministic results.""" ) def test_save_load_fast_init_to_base(self): pass @unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""") def test_model_outputs_equivalence(self): pass @slow def test_model_from_pretrained(self): for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = ViTMAEModel.from_pretrained(model_name) self.assertIsNotNone(model) # We will verify our results on an image of cute cats def prepare_img(): image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class ViTMAEModelIntegrationTest(unittest.TestCase): @cached_property def default_feature_extractor(self): return ViTFeatureExtractor.from_pretrained("facebook/vit-mae-base") if is_vision_available() else None @slow def test_inference_for_pretraining(self): # make random mask reproducible across the PT and TF model np.random.seed(2) model = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base").to(torch_device) feature_extractor = self.default_feature_extractor image = prepare_img() inputs = feature_extractor(images=image, return_tensors="pt").to(torch_device) # prepare a noise vector that will be also used for testing the TF model # (this way we can ensure that the PT and TF models operate on the same inputs) vit_mae_config = ViTMAEConfig() num_patches = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2) noise = np.random.uniform(size=(1, num_patches)) # forward pass with torch.no_grad(): outputs = model(**inputs, noise=torch.from_numpy(noise).to(device=torch_device)) # verify the logits expected_shape = torch.Size((1, 196, 768)) self.assertEqual(outputs.logits.shape, expected_shape) expected_slice = torch.tensor( [[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3], expected_slice.to(torch_device), atol=1e-4))
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/layoutxlm/tokenization_layoutxlm_fast.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for LayoutXLM model.""" import os from shutil import copyfile from typing import Dict, List, Optional, Tuple, Union from ...tokenization_utils import AddedToken from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging from ..xlm_roberta.tokenization_xlm_roberta_fast import ( PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES, PRETRAINED_VOCAB_FILES_MAP, VOCAB_FILES_NAMES, ) if is_sentencepiece_available(): from .tokenization_layoutxlm import LayoutXLMTokenizer else: LayoutXLMTokenizer = None logger = logging.get_logger(__name__) LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = LayoutXLMTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs, ) self.vocab_file = vocab_file self.can_save_slow_tokenizer = False if not self.vocab_file else True # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: batched_input = [(text, pair)] if pair else [text] encodings = self._tokenizer.encode_batch( batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs ) return encodings[0].tokens def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})") # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, ) if is_pair: batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs] encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs ) # Convert encoding to dict # `Tokens` has type: Tuple[ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], # List[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, # we use offsets to create the labels return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0].keys(): stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) # create the token boxes token_boxes = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index token_boxes_example = [] for id, sequence_id, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if is_pair and sequence_id == 0: token_boxes_example.append(self.pad_token_box) else: token_boxes_example.append(boxes[original_index][word_id]) else: if id == self.cls_token_id: token_boxes_example.append(self.cls_token_box) elif id == self.sep_token_id: token_boxes_example.append(self.sep_token_box) elif id == self.pad_token_id: token_boxes_example.append(self.pad_token_box) else: raise ValueError("Id not recognized") token_boxes.append(token_boxes_example) sanitized_tokens["bbox"] = token_boxes # optionally, create the labels if word_labels is not None: labels = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index labels_example = [] for id, offset, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_tokens["offset_mapping"][batch_index], sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if self.only_label_first_subword: if offset[0] == 0: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) else: labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) labels.append(labels_example) sanitized_tokens["labels"] = labels # finally, remove offsets if the user didn't want them if not return_offsets_mapping: del sanitized_tokens["offset_mapping"] return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # make it a batched input # 2 options: # 1) only text, in case text must be a list of str # 2) text + text_pair, in which case text = str and text_pair a list of str batched_input = [(text, text_pair)] if text_pair else [text] batched_boxes = [boxes] batched_word_labels = [word_labels] if word_labels is not None else None batched_output = self._batch_encode_plus( batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License """ Tokenization classes for LayoutXLM model.""" import os from shutil import copyfile from typing import Dict, List, Optional, Tuple, Union from ...tokenization_utils import AddedToken from ...tokenization_utils_base import ( BatchEncoding, EncodedInput, PreTokenizedInput, TextInput, TextInputPair, TruncationStrategy, ) from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import PaddingStrategy, TensorType, add_end_docstrings, is_sentencepiece_available, logging from ..xlm_roberta.tokenization_xlm_roberta_fast import ( PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES, PRETRAINED_VOCAB_FILES_MAP, VOCAB_FILES_NAMES, ) if is_sentencepiece_available(): from .tokenization_layoutxlm import LayoutXLMTokenizer else: LayoutXLMTokenizer = None logger = logging.get_logger(__name__) LAYOUTXLM_ENCODE_KWARGS_DOCSTRING = r""" add_special_tokens (`bool`, *optional*, defaults to `True`): Whether or not to encode the sequences with the special tokens relative to their model. padding (`bool`, `str` or [`~file_utils.PaddingStrategy`], *optional*, defaults to `False`): Activates and controls padding. Accepts the following values: - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`): Activates and controls truncation. Accepts the following values: - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will truncate token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch of pairs) is provided. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the first sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum acceptable input length for the model if that argument is not provided. This will only truncate the second sequence of a pair if a pair of sequences (or a batch of pairs) is provided. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths greater than the model maximum admissible input size). max_length (`int`, *optional*): Controls the maximum length to use by one of the truncation/padding parameters. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length is required by one of the truncation/padding parameters. If the model has no specific maximum input length (like XLNet) truncation/padding to a maximum length will be deactivated. stride (`int`, *optional*, defaults to 0): If set to a number along with `max_length`, the overflowing tokens returned when `return_overflowing_tokens=True` will contain some tokens from the end of the truncated sequence returned to provide some overlap between truncated and overflowing sequences. The value of this argument defines the number of overlapping tokens. pad_to_multiple_of (`int`, *optional*): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_tensors (`str` or [`~file_utils.TensorType`], *optional*): If set, will return tensors instead of list of python integers. Acceptable values are: - `'tf'`: Return TensorFlow `tf.constant` objects. - `'pt'`: Return PyTorch `torch.Tensor` objects. - `'np'`: Return Numpy `np.ndarray` objects. return_token_type_ids (`bool`, *optional*): Whether to return token type IDs. If left to the default, will return the token type IDs according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are token type IDs?](../glossary#token-type-ids) return_attention_mask (`bool`, *optional*): Whether to return the attention mask. If left to the default, will return the attention mask according to the specific tokenizer's default, defined by the `return_outputs` attribute. [What are attention masks?](../glossary#attention-mask) return_overflowing_tokens (`bool`, *optional*, defaults to `False`): Whether or not to return overflowing token sequences. If a pair of sequences of input ids (or a batch of pairs) is provided with `truncation_strategy = longest_first` or `True`, an error is raised instead of returning overflowing tokens. return_special_tokens_mask (`bool`, *optional*, defaults to `False`): Whether or not to return special tokens mask information. return_offsets_mapping (`bool`, *optional*, defaults to `False`): Whether or not to return `(char_start, char_end)` for each token. This is only available on fast tokenizers inheriting from [`PreTrainedTokenizerFast`], if using Python's tokenizer, this method will raise `NotImplementedError`. return_length (`bool`, *optional*, defaults to `False`): Whether or not to return the lengths of the encoded inputs. verbose (`bool`, *optional*, defaults to `True`): Whether or not to print more information and warnings. **kwargs: passed to the `self.tokenize()` method Return: [`BatchEncoding`]: A [`BatchEncoding`] with the following fields: - **input_ids** -- List of token ids to be fed to a model. [What are input IDs?](../glossary#input-ids) - **bbox** -- List of bounding boxes to be fed to a model. - **token_type_ids** -- List of token type ids to be fed to a model (when `return_token_type_ids=True` or if *"token_type_ids"* is in `self.model_input_names`). [What are token type IDs?](../glossary#token-type-ids) - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names`). [What are attention masks?](../glossary#attention-mask) - **labels** -- List of labels to be fed to a model. (when `word_labels` is specified). - **overflowing_tokens** -- List of overflowing tokens sequences (when a `max_length` is specified and `return_overflowing_tokens=True`). - **num_truncated_tokens** -- Number of tokens truncated (when a `max_length` is specified and `return_overflowing_tokens=True`). - **special_tokens_mask** -- List of 0s and 1s, with 1 specifying added special tokens and 0 specifying regular sequence tokens (when `add_special_tokens=True` and `return_special_tokens_mask=True`). - **length** -- The length of the inputs (when `return_length=True`). """ class LayoutXLMTokenizerFast(PreTrainedTokenizerFast): """ Construct a "fast" LayoutXLM tokenizer (backed by HuggingFace's *tokenizers* library). Adapted from [`RobertaTokenizer`] and [`XLNetTokenizer`]. Based on [BPE](https://huggingface.co/docs/tokenizers/python/latest/components.html?highlight=BPE#models). This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. cls_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [CLS] token. sep_token_box (`List[int]`, *optional*, defaults to `[1000, 1000, 1000, 1000]`): The bounding box to use for the special [SEP] token. pad_token_box (`List[int]`, *optional*, defaults to `[0, 0, 0, 0]`): The bounding box to use for the special [PAD] token. pad_token_label (`int`, *optional*, defaults to -100): The label to use for padding tokens. Defaults to -100, which is the `ignore_index` of PyTorch's CrossEntropyLoss. only_label_first_subword (`bool`, *optional*, defaults to `True`): Whether or not to only label the first subword, in case word labels are provided. additional_special_tokens (`List[str]`, *optional*, defaults to `["<s>NOTUSED", "</s>NOTUSED"]`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] slow_tokenizer_class = LayoutXLMTokenizer def __init__( self, vocab_file=None, tokenizer_file=None, bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", cls_token_box=[0, 0, 0, 0], sep_token_box=[1000, 1000, 1000, 1000], pad_token_box=[0, 0, 0, 0], pad_token_label=-100, only_label_first_subword=True, **kwargs, ): # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( vocab_file, tokenizer_file=tokenizer_file, bos_token=bos_token, eos_token=eos_token, sep_token=sep_token, cls_token=cls_token, unk_token=unk_token, pad_token=pad_token, mask_token=mask_token, cls_token_box=cls_token_box, sep_token_box=sep_token_box, pad_token_box=pad_token_box, pad_token_label=pad_token_label, only_label_first_subword=only_label_first_subword, **kwargs, ) self.vocab_file = vocab_file self.can_save_slow_tokenizer = False if not self.vocab_file else True # additional properties self.cls_token_box = cls_token_box self.sep_token_box = sep_token_box self.pad_token_box = pad_token_box self.pad_token_label = pad_token_label self.only_label_first_subword = only_label_first_subword @add_end_docstrings(LAYOUTXLM_ENCODE_KWARGS_DOCSTRING) def __call__( self, text: Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]], text_pair: Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None, boxes: Union[List[List[int]], List[List[List[int]]]] = None, word_labels: Optional[Union[List[int], List[List[int]]]] = None, add_special_tokens: bool = True, padding: Union[bool, str, PaddingStrategy] = False, truncation: Union[bool, str, TruncationStrategy] = None, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[Union[str, TensorType]] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: """ Main method to tokenize and prepare for the model one or several sequence(s) or one or several pair(s) of sequences with word-level normalized bounding boxes and optional labels. Args: text (`str`, `List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence can be a string, a list of strings (words of a single example or questions of a batch of examples) or a list of list of strings (batch of words). text_pair (`List[str]`, `List[List[str]]`): The sequence or batch of sequences to be encoded. Each sequence should be a list of strings (pretokenized string). boxes (`List[List[int]]`, `List[List[List[int]]]`): Word-level bounding boxes. Each bounding box should be normalized to be on a 0-1000 scale. word_labels (`List[int]`, `List[List[int]]`, *optional*): Word-level integer labels (for token classification tasks such as FUNSD, CORD). """ # Input type checking for clearer error def _is_valid_text_input(t): if isinstance(t, str): # Strings are fine return True elif isinstance(t, (list, tuple)): # List are fine as long as they are... if len(t) == 0: # ... empty return True elif isinstance(t[0], str): # ... list of strings return True elif isinstance(t[0], (list, tuple)): # ... list with an empty list or with a list of strings return len(t[0]) == 0 or isinstance(t[0][0], str) else: return False else: return False if text_pair is not None: # in case text + text_pair are provided, text = questions, text_pair = words if not _is_valid_text_input(text): raise ValueError("text input must of type `str` (single example) or `List[str]` (batch of examples). ") if not isinstance(text_pair, (list, tuple)): raise ValueError( "words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) else: # in case only text is provided => must be words if not isinstance(text, (list, tuple)): raise ValueError( "Words must of type `List[str]` (single pretokenized example), " "or `List[List[str]]` (batch of pretokenized examples)." ) if text_pair is not None: is_batched = isinstance(text, (list, tuple)) else: is_batched = isinstance(text, (list, tuple)) and text and isinstance(text[0], (list, tuple)) words = text if text_pair is None else text_pair if boxes is None: raise ValueError("You must provide corresponding bounding boxes") if is_batched: if len(words) != len(boxes): raise ValueError("You must provide words and boxes for an equal amount of examples") for words_example, boxes_example in zip(words, boxes): if len(words_example) != len(boxes_example): raise ValueError("You must provide as many words as there are bounding boxes") else: if len(words) != len(boxes): raise ValueError("You must provide as many words as there are bounding boxes") if is_batched: if text_pair is not None and len(text) != len(text_pair): raise ValueError( f"batch length of `text`: {len(text)} does not match batch length of `text_pair`:" f" {len(text_pair)}." ) batch_text_or_text_pairs = list(zip(text, text_pair)) if text_pair is not None else text is_pair = bool(text_pair is not None) return self.batch_encode_plus( batch_text_or_text_pairs=batch_text_or_text_pairs, is_pair=is_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) else: return self.encode_plus( text=text, text_pair=text_pair, boxes=boxes, word_labels=word_labels, add_special_tokens=add_special_tokens, padding=padding, truncation=truncation, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) def tokenize(self, text: str, pair: Optional[str] = None, add_special_tokens: bool = False, **kwargs) -> List[str]: batched_input = [(text, pair)] if pair else [text] encodings = self._tokenizer.encode_batch( batched_input, add_special_tokens=add_special_tokens, is_pretokenized=False, **kwargs ) return encodings[0].tokens def _batch_encode_plus( self, batch_text_or_text_pairs: Union[ List[TextInput], List[TextInputPair], List[PreTokenizedInput], ], is_pair: bool = None, boxes: Optional[List[List[List[int]]]] = None, word_labels: Optional[List[List[int]]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[str] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: if not isinstance(batch_text_or_text_pairs, list): raise TypeError(f"batch_text_or_text_pairs has to be a list (got {type(batch_text_or_text_pairs)})") # Set the truncation and padding strategy and restore the initial configuration self.set_truncation_and_padding( padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, ) if is_pair: batch_text_or_text_pairs = [(text.split(), text_pair) for text, text_pair in batch_text_or_text_pairs] encodings = self._tokenizer.encode_batch( batch_text_or_text_pairs, add_special_tokens=add_special_tokens, is_pretokenized=True, # we set this to True as LayoutLMv2 always expects pretokenized inputs ) # Convert encoding to dict # `Tokens` has type: Tuple[ # List[Dict[str, List[List[int]]]] or List[Dict[str, 2D-Tensor]], # List[EncodingFast] # ] # with nested dimensions corresponding to batch, overflows, sequence length tokens_and_encodings = [ self._convert_encoding( encoding=encoding, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=True if word_labels is not None else return_offsets_mapping, # we use offsets to create the labels return_length=return_length, verbose=verbose, ) for encoding in encodings ] # Convert the output to have dict[list] from list[dict] and remove the additional overflows dimension # From (variable) shape (batch, overflows, sequence length) to ~ (batch * overflows, sequence length) # (we say ~ because the number of overflow varies with the example in the batch) # # To match each overflowing sample with the original sample in the batch # we add an overflow_to_sample_mapping array (see below) sanitized_tokens = {} for key in tokens_and_encodings[0][0].keys(): stack = [e for item, _ in tokens_and_encodings for e in item[key]] sanitized_tokens[key] = stack sanitized_encodings = [e for _, item in tokens_and_encodings for e in item] # If returning overflowing tokens, we need to return a mapping # from the batch idx to the original sample if return_overflowing_tokens: overflow_to_sample_mapping = [] for i, (toks, _) in enumerate(tokens_and_encodings): overflow_to_sample_mapping += [i] * len(toks["input_ids"]) sanitized_tokens["overflow_to_sample_mapping"] = overflow_to_sample_mapping for input_ids in sanitized_tokens["input_ids"]: self._eventual_warn_about_too_long_sequence(input_ids, max_length, verbose) # create the token boxes token_boxes = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index token_boxes_example = [] for id, sequence_id, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_encodings[batch_index].sequence_ids, sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if is_pair and sequence_id == 0: token_boxes_example.append(self.pad_token_box) else: token_boxes_example.append(boxes[original_index][word_id]) else: if id == self.cls_token_id: token_boxes_example.append(self.cls_token_box) elif id == self.sep_token_id: token_boxes_example.append(self.sep_token_box) elif id == self.pad_token_id: token_boxes_example.append(self.pad_token_box) else: raise ValueError("Id not recognized") token_boxes.append(token_boxes_example) sanitized_tokens["bbox"] = token_boxes # optionally, create the labels if word_labels is not None: labels = [] for batch_index in range(len(sanitized_tokens["input_ids"])): if return_overflowing_tokens: original_index = sanitized_tokens["overflow_to_sample_mapping"][batch_index] else: original_index = batch_index labels_example = [] for id, offset, word_id in zip( sanitized_tokens["input_ids"][batch_index], sanitized_tokens["offset_mapping"][batch_index], sanitized_encodings[batch_index].word_ids, ): if word_id is not None: if self.only_label_first_subword: if offset[0] == 0: # Use the real label id for the first token of the word, and padding ids for the remaining tokens labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) else: labels_example.append(word_labels[original_index][word_id]) else: labels_example.append(self.pad_token_label) labels.append(labels_example) sanitized_tokens["labels"] = labels # finally, remove offsets if the user didn't want them if not return_offsets_mapping: del sanitized_tokens["offset_mapping"] return BatchEncoding(sanitized_tokens, sanitized_encodings, tensor_type=return_tensors) def _encode_plus( self, text: Union[TextInput, PreTokenizedInput], text_pair: Optional[PreTokenizedInput] = None, boxes: Optional[List[List[int]]] = None, word_labels: Optional[List[int]] = None, add_special_tokens: bool = True, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, truncation_strategy: TruncationStrategy = TruncationStrategy.DO_NOT_TRUNCATE, max_length: Optional[int] = None, stride: int = 0, pad_to_multiple_of: Optional[int] = None, return_tensors: Optional[bool] = None, return_token_type_ids: Optional[bool] = None, return_attention_mask: Optional[bool] = None, return_overflowing_tokens: bool = False, return_special_tokens_mask: bool = False, return_offsets_mapping: bool = False, return_length: bool = False, verbose: bool = True, **kwargs, ) -> BatchEncoding: # make it a batched input # 2 options: # 1) only text, in case text must be a list of str # 2) text + text_pair, in which case text = str and text_pair a list of str batched_input = [(text, text_pair)] if text_pair else [text] batched_boxes = [boxes] batched_word_labels = [word_labels] if word_labels is not None else None batched_output = self._batch_encode_plus( batched_input, is_pair=bool(text_pair is not None), boxes=batched_boxes, word_labels=batched_word_labels, add_special_tokens=add_special_tokens, padding_strategy=padding_strategy, truncation_strategy=truncation_strategy, max_length=max_length, stride=stride, pad_to_multiple_of=pad_to_multiple_of, return_tensors=return_tensors, return_token_type_ids=return_token_type_ids, return_attention_mask=return_attention_mask, return_overflowing_tokens=return_overflowing_tokens, return_special_tokens_mask=return_special_tokens_mask, return_offsets_mapping=return_offsets_mapping, return_length=return_length, verbose=verbose, **kwargs, ) # Return tensor is None, then we can remove the leading batch axis # Overflowing tokens are returned as a batch of output so we keep them in this case if return_tensors is None and not return_overflowing_tokens: batched_output = BatchEncoding( { key: value[0] if len(value) > 0 and isinstance(value[0], list) else value for key, value in batched_output.items() }, batched_output.encodings, ) self._eventual_warn_about_too_long_sequence(batched_output["input_ids"], max_length, verbose) return batched_output def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: """ Pad encoded inputs (on left/right and up to predefined length or max length in the batch) Args: encoded_inputs: Dictionary of tokenized inputs (`List[int]`) or batch of tokenized inputs (`List[List[int]]`). max_length: maximum length of the returned list and optionally padding length (see below). Will truncate by taking into account the special tokens. padding_strategy: PaddingStrategy to use for padding. - PaddingStrategy.LONGEST Pad to the longest sequence in the batch - PaddingStrategy.MAX_LENGTH: Pad to the max length (default) - PaddingStrategy.DO_NOT_PAD: Do not pad The tokenizer padding sides are defined in self.padding_side: - 'left': pads on the left of the sequences - 'right': pads on the right of the sequences pad_to_multiple_of: (optional) Integer if set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Core on NVIDIA hardware with compute capability `>= 7.5` (Volta). return_attention_mask: (optional) Set to False to avoid returning attention mask (default: set to model specifics) """ # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names required_input = encoded_inputs[self.model_input_names[0]] if padding_strategy == PaddingStrategy.LONGEST: max_length = len(required_input) if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0): max_length = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of needs_to_be_padded = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(required_input) != max_length # Initialize attention mask if not present. if return_attention_mask and "attention_mask" not in encoded_inputs: encoded_inputs["attention_mask"] = [1] * len(required_input) if needs_to_be_padded: difference = max_length - len(required_input) if self.padding_side == "right": if return_attention_mask: encoded_inputs["attention_mask"] = encoded_inputs["attention_mask"] + [0] * difference if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = ( encoded_inputs["token_type_ids"] + [self.pad_token_type_id] * difference ) if "bbox" in encoded_inputs: encoded_inputs["bbox"] = encoded_inputs["bbox"] + [self.pad_token_box] * difference if "labels" in encoded_inputs: encoded_inputs["labels"] = encoded_inputs["labels"] + [self.pad_token_label] * difference if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = encoded_inputs["special_tokens_mask"] + [1] * difference encoded_inputs[self.model_input_names[0]] = required_input + [self.pad_token_id] * difference elif self.padding_side == "left": if return_attention_mask: encoded_inputs["attention_mask"] = [0] * difference + encoded_inputs["attention_mask"] if "token_type_ids" in encoded_inputs: encoded_inputs["token_type_ids"] = [self.pad_token_type_id] * difference + encoded_inputs[ "token_type_ids" ] if "bbox" in encoded_inputs: encoded_inputs["bbox"] = [self.pad_token_box] * difference + encoded_inputs["bbox"] if "labels" in encoded_inputs: encoded_inputs["labels"] = [self.pad_token_label] * difference + encoded_inputs["labels"] if "special_tokens_mask" in encoded_inputs: encoded_inputs["special_tokens_mask"] = [1] * difference + encoded_inputs["special_tokens_mask"] encoded_inputs[self.model_input_names[0]] = [self.pad_token_id] * difference + required_input else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An XLM-RoBERTa sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. XLM-RoBERTa does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory.") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/fsmt/test_modeling_fsmt.py
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import timeout_decorator # noqa from parameterized import parameterized from transformers import FSMTConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import FSMTForConditionalGeneration, FSMTModel, FSMTTokenizer from transformers.models.fsmt.modeling_fsmt import ( SinusoidalPositionalEmbedding, _prepare_fsmt_decoder_inputs, invert_mask, shift_tokens_right, ) from transformers.pipelines import TranslationPipeline class FSMTModelTester: def __init__( self, parent, src_vocab_size=99, tgt_vocab_size=99, langs=["ru", "en"], batch_size=13, seq_length=7, is_training=False, use_labels=False, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, bos_token_id=0, pad_token_id=1, eos_token_id=2, ): self.parent = parent self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.langs = langs self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.eos_token_id = eos_token_id torch.manual_seed(0) # hack needed for modeling_common tests - despite not really having this attribute in this model self.vocab_size = self.src_vocab_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.src_vocab_size).clamp( 3, ) input_ids[:, -1] = 2 # Eos Token config = self.get_config() inputs_dict = prepare_fsmt_inputs_dict(config, input_ids) return config, inputs_dict def get_config(self): return FSMTConfig( vocab_size=self.src_vocab_size, # hack needed for common tests src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict["decoder_attention_mask"] = inputs_dict["attention_mask"] inputs_dict["use_cache"] = False return config, inputs_dict def prepare_fsmt_inputs_dict( config, input_ids, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_torch class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FSMTModel, FSMTForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (FSMTForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": FSMTForConditionalGeneration, "feature-extraction": FSMTModel, "summarization": FSMTForConditionalGeneration, "text2text-generation": FSMTForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = FSMTModelTester(self) self.langs = ["en", "ru"] config = { "langs": self.langs, "src_vocab_size": 10, "tgt_vocab_size": 20, } # XXX: hack to appease to all other models requiring `vocab_size` config["vocab_size"] = 99 # no such thing in FSMT self.config_tester = ConfigTester(self, config_class=FSMTConfig, **config) def test_config(self): self.config_tester.run_common_tests() # XXX: override test_model_common_attributes / different Embedding type def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding)) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.modules.sparse.Embedding)) def test_initialization_more(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config) model.to(torch_device) model.eval() # test init # self.assertTrue((model.encoder.embed_tokens.weight == model.shared.weight).all().item()) def _check_var(module): """Check that we initialized various parameters from N(0, config.init_std).""" self.assertAlmostEqual(torch.std(module.weight).item(), config.init_std, 2) _check_var(model.encoder.embed_tokens) _check_var(model.encoder.layers[0].self_attn.k_proj) _check_var(model.encoder.layers[0].fc1) # XXX: different std for fairseq version of SinusoidalPositionalEmbedding # self.assertAlmostEqual(torch.std(model.encoder.embed_positions.weights).item(), config.init_std, 2) def test_advanced_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_cache = False inputs_dict["input_ids"][:, -2:] = config.pad_token_id decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, inputs_dict["input_ids"] ) model = FSMTModel(config).to(torch_device).eval() decoder_features_with_created_mask = model(**inputs_dict)[0] decoder_features_with_passed_mask = model( decoder_attention_mask=invert_mask(decoder_attn_mask), decoder_input_ids=decoder_input_ids, **inputs_dict )[0] _assert_tensors_equal(decoder_features_with_passed_mask, decoder_features_with_created_mask) useless_mask = torch.zeros_like(decoder_attn_mask) decoder_features = model(decoder_attention_mask=useless_mask, **inputs_dict)[0] self.assertTrue(isinstance(decoder_features, torch.Tensor)) # no hidden states or attentions self.assertEqual( decoder_features.size(), (self.model_tester.batch_size, self.model_tester.seq_length, config.tgt_vocab_size), ) if decoder_attn_mask.min().item() < -1e3: # some tokens were masked self.assertFalse((decoder_features_with_created_mask == decoder_features).all().item()) # Test different encoder attention masks decoder_features_with_long_encoder_mask = model( inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"].long() )[0] _assert_tensors_equal(decoder_features_with_long_encoder_mask, decoder_features_with_created_mask) def test_save_load_missing_keys(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (inputs_dict["input_ids"], inputs_dict["attention_mask"]), f"{tmpdirname}/fsmt_test.onnx", export_params=True, opset_version=12, input_names=["input_ids", "attention_mask"], ) @unittest.skip("can't be implemented for FSMT due to dual vocab.") def test_resize_tokens_embeddings(self): pass @unittest.skip("Passing inputs_embeds not implemented for FSMT.") def test_inputs_embeds(self): pass @unittest.skip("model weights aren't tied in FSMT.") def test_tie_model_weights(self): pass @unittest.skip("TODO: Decoder embeddings cannot be resized at the moment") def test_resize_embeddings_untied(self): pass @require_torch class FSMTHeadTests(unittest.TestCase): src_vocab_size = 99 tgt_vocab_size = 99 langs = ["ru", "en"] def _get_config(self): return FSMTConfig( src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = self._get_config() return config, input_ids, batch_size def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], dtype=torch.long, device=torch_device) config = self._get_config() lm_model = FSMTForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 new_input_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(new_input_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = FSMTForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = FSMTForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_prepare_fsmt_decoder_inputs(self): config, *_ = self._get_config_and_data() input_ids = _long_tensor(([4, 4, 2])) decoder_input_ids = _long_tensor([[26388, 2, config.pad_token_id]]) causal_mask_dtype = torch.float32 ignore = torch.finfo(causal_mask_dtype).min decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, input_ids, decoder_input_ids, causal_mask_dtype=causal_mask_dtype ) expected_causal_mask = torch.tensor( [[0, ignore, ignore], [0, 0, ignore], [0, 0, 0]] # never attend to the final token, because its pad ).to(input_ids.device) self.assertEqual(decoder_attn_mask.size(), decoder_input_ids.size()) self.assertTrue(torch.eq(expected_causal_mask, causal_mask).all()) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: if len(prefix) > 0: prefix = f"{prefix}: " raise AssertionError(f"{prefix}{a} != {b}") def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 pairs = [ ["en-ru"], ["ru-en"], ["en-de"], ["de-en"], ] @require_torch @require_sentencepiece @require_tokenizers class FSMTModelIntegrationTests(unittest.TestCase): tokenizers_cache = {} models_cache = {} default_mname = "facebook/wmt19-en-ru" @cached_property def default_tokenizer(self): return self.get_tokenizer(self.default_mname) @cached_property def default_model(self): return self.get_model(self.default_mname) def get_tokenizer(self, mname): if mname not in self.tokenizers_cache: self.tokenizers_cache[mname] = FSMTTokenizer.from_pretrained(mname) return self.tokenizers_cache[mname] def get_model(self, mname): if mname not in self.models_cache: self.models_cache[mname] = FSMTForConditionalGeneration.from_pretrained(mname).to(torch_device) if torch_device == "cuda": self.models_cache[mname].half() return self.models_cache[mname] @slow def test_inference_no_head(self): tokenizer = self.default_tokenizer model = FSMTModel.from_pretrained(self.default_mname).to(torch_device) src_text = "My friend computer will translate this for me" input_ids = tokenizer([src_text], return_tensors="pt")["input_ids"] input_ids = _long_tensor(input_ids).to(torch_device) inputs_dict = prepare_fsmt_inputs_dict(model.config, input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 10, model.config.tgt_vocab_size)) self.assertEqual(output.shape, expected_shape) # expected numbers were generated when en-ru model, using just fairseq's model4.pt # may have to adjust if switched to a different checkpoint expected_slice = torch.tensor( [[-1.5753, -1.5753, 2.8975], [-0.9540, -0.9540, 1.0299], [-3.3131, -3.3131, 0.5219]] ).to(torch_device) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def translation_setup(self, pair): text = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } src, tgt = pair.split("-") print(f"Testing {src} -> {tgt}") mname = f"facebook/wmt19-{pair}" src_text = text[src] tgt_text = text[tgt] tokenizer = self.get_tokenizer(mname) model = self.get_model(mname) return tokenizer, model, src_text, tgt_text @parameterized.expand(pairs) @slow def test_translation_direct(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) input_ids = tokenizer.encode(src_text, return_tensors="pt").to(torch_device) outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) assert decoded == tgt_text, f"\n\ngot: {decoded}\nexp: {tgt_text}\n" @parameterized.expand(pairs) @slow def test_translation_pipeline(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) device = 0 if torch_device == "cuda" else -1 pipeline = TranslationPipeline(model, tokenizer, framework="pt", device=device) output = pipeline([src_text]) self.assertEqual([tgt_text], [x["translation_text"] for x in output]) @require_torch class TestSinusoidalPositionalEmbeddings(unittest.TestCase): padding_idx = 1 tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = SinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6, padding_idx=self.padding_idx).to( torch_device ) emb = emb1(input_ids) desired_weights = torch.tensor( [ [9.0930e-01, 1.9999e-02, 2.0000e-04, -4.1615e-01, 9.9980e-01, 1.0000e00], [1.4112e-01, 2.9995e-02, 3.0000e-04, -9.8999e-01, 9.9955e-01, 1.0000e00], ] ).to(torch_device) self.assertTrue( torch.allclose(emb[0], desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_odd_embed_dim(self): # odd embedding_dim is allowed SinusoidalPositionalEmbedding(num_positions=4, embedding_dim=5, padding_idx=self.padding_idx).to(torch_device) # odd num_embeddings is allowed SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=self.padding_idx).to(torch_device) @unittest.skip("different from marian (needs more research)") def test_positional_emb_weights_against_marian(self): desired_weights = torch.tensor( [ [0, 0, 0, 0, 0], [0.84147096, 0.82177866, 0.80180490, 0.78165019, 0.76140374], [0.90929741, 0.93651021, 0.95829457, 0.97505713, 0.98720258], ] ) emb1 = SinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512, padding_idx=self.padding_idx).to( torch_device ) weights = emb1.weights.data[:3, :5] # XXX: only the 1st and 3rd lines match - this is testing against # verbatim copy of SinusoidalPositionalEmbedding from fairseq self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) # test that forward pass is just a lookup, there is no ignore padding logic input_ids = torch.tensor( [[4, 10, self.padding_idx, self.padding_idx, self.padding_idx]], dtype=torch.long, device=torch_device ) no_cache_pad_zero = emb1(input_ids)[0] # XXX: only the 1st line matches the 3rd self.assertTrue( torch.allclose(torch.tensor(desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], atol=1e-3) )
# coding=utf-8 # Copyright 2020 Huggingface # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import timeout_decorator # noqa from parameterized import parameterized from transformers import FSMTConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from transformers.utils import cached_property from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import FSMTForConditionalGeneration, FSMTModel, FSMTTokenizer from transformers.models.fsmt.modeling_fsmt import ( SinusoidalPositionalEmbedding, _prepare_fsmt_decoder_inputs, invert_mask, shift_tokens_right, ) from transformers.pipelines import TranslationPipeline class FSMTModelTester: def __init__( self, parent, src_vocab_size=99, tgt_vocab_size=99, langs=["ru", "en"], batch_size=13, seq_length=7, is_training=False, use_labels=False, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="relu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, bos_token_id=0, pad_token_id=1, eos_token_id=2, ): self.parent = parent self.src_vocab_size = src_vocab_size self.tgt_vocab_size = tgt_vocab_size self.langs = langs self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.eos_token_id = eos_token_id torch.manual_seed(0) # hack needed for modeling_common tests - despite not really having this attribute in this model self.vocab_size = self.src_vocab_size def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.src_vocab_size).clamp( 3, ) input_ids[:, -1] = 2 # Eos Token config = self.get_config() inputs_dict = prepare_fsmt_inputs_dict(config, input_ids) return config, inputs_dict def get_config(self): return FSMTConfig( vocab_size=self.src_vocab_size, # hack needed for common tests src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() inputs_dict["decoder_input_ids"] = inputs_dict["input_ids"] inputs_dict["decoder_attention_mask"] = inputs_dict["attention_mask"] inputs_dict["use_cache"] = False return config, inputs_dict def prepare_fsmt_inputs_dict( config, input_ids, attention_mask=None, head_mask=None, decoder_head_mask=None, cross_attn_head_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if head_mask is None: head_mask = torch.ones(config.encoder_layers, config.encoder_attention_heads, device=torch_device) if decoder_head_mask is None: decoder_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) if cross_attn_head_mask is None: cross_attn_head_mask = torch.ones(config.decoder_layers, config.decoder_attention_heads, device=torch_device) return { "input_ids": input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_torch class FSMTModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase): all_model_classes = (FSMTModel, FSMTForConditionalGeneration) if is_torch_available() else () all_generative_model_classes = (FSMTForConditionalGeneration,) if is_torch_available() else () pipeline_model_mapping = ( { "conversational": FSMTForConditionalGeneration, "feature-extraction": FSMTModel, "summarization": FSMTForConditionalGeneration, "text2text-generation": FSMTForConditionalGeneration, } if is_torch_available() else {} ) is_encoder_decoder = True test_pruning = False test_missing_keys = False def setUp(self): self.model_tester = FSMTModelTester(self) self.langs = ["en", "ru"] config = { "langs": self.langs, "src_vocab_size": 10, "tgt_vocab_size": 20, } # XXX: hack to appease to all other models requiring `vocab_size` config["vocab_size"] = 99 # no such thing in FSMT self.config_tester = ConfigTester(self, config_class=FSMTConfig, **config) def test_config(self): self.config_tester.run_common_tests() # XXX: override test_model_common_attributes / different Embedding type def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) self.assertIsInstance(model.get_input_embeddings(), (nn.Embedding)) model.set_input_embeddings(nn.Embedding(10, 10)) x = model.get_output_embeddings() self.assertTrue(x is None or isinstance(x, nn.modules.sparse.Embedding)) def test_initialization_more(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config) model.to(torch_device) model.eval() # test init # self.assertTrue((model.encoder.embed_tokens.weight == model.shared.weight).all().item()) def _check_var(module): """Check that we initialized various parameters from N(0, config.init_std).""" self.assertAlmostEqual(torch.std(module.weight).item(), config.init_std, 2) _check_var(model.encoder.embed_tokens) _check_var(model.encoder.layers[0].self_attn.k_proj) _check_var(model.encoder.layers[0].fc1) # XXX: different std for fairseq version of SinusoidalPositionalEmbedding # self.assertAlmostEqual(torch.std(model.encoder.embed_positions.weights).item(), config.init_std, 2) def test_advanced_inputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() config.use_cache = False inputs_dict["input_ids"][:, -2:] = config.pad_token_id decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, inputs_dict["input_ids"] ) model = FSMTModel(config).to(torch_device).eval() decoder_features_with_created_mask = model(**inputs_dict)[0] decoder_features_with_passed_mask = model( decoder_attention_mask=invert_mask(decoder_attn_mask), decoder_input_ids=decoder_input_ids, **inputs_dict )[0] _assert_tensors_equal(decoder_features_with_passed_mask, decoder_features_with_created_mask) useless_mask = torch.zeros_like(decoder_attn_mask) decoder_features = model(decoder_attention_mask=useless_mask, **inputs_dict)[0] self.assertTrue(isinstance(decoder_features, torch.Tensor)) # no hidden states or attentions self.assertEqual( decoder_features.size(), (self.model_tester.batch_size, self.model_tester.seq_length, config.tgt_vocab_size), ) if decoder_attn_mask.min().item() < -1e3: # some tokens were masked self.assertFalse((decoder_features_with_created_mask == decoder_features).all().item()) # Test different encoder attention masks decoder_features_with_long_encoder_mask = model( inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"].long() )[0] _assert_tensors_equal(decoder_features_with_long_encoder_mask, decoder_features_with_created_mask) def test_save_load_missing_keys(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) @unittest.skip("Test has a segmentation fault on torch 1.8.0") def test_export_to_onnx(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() model = FSMTModel(config).to(torch_device) with tempfile.TemporaryDirectory() as tmpdirname: torch.onnx.export( model, (inputs_dict["input_ids"], inputs_dict["attention_mask"]), f"{tmpdirname}/fsmt_test.onnx", export_params=True, opset_version=12, input_names=["input_ids", "attention_mask"], ) @unittest.skip("can't be implemented for FSMT due to dual vocab.") def test_resize_tokens_embeddings(self): pass @unittest.skip("Passing inputs_embeds not implemented for FSMT.") def test_inputs_embeds(self): pass @unittest.skip("model weights aren't tied in FSMT.") def test_tie_model_weights(self): pass @unittest.skip("TODO: Decoder embeddings cannot be resized at the moment") def test_resize_embeddings_untied(self): pass @require_torch class FSMTHeadTests(unittest.TestCase): src_vocab_size = 99 tgt_vocab_size = 99 langs = ["ru", "en"] def _get_config(self): return FSMTConfig( src_vocab_size=self.src_vocab_size, tgt_vocab_size=self.tgt_vocab_size, langs=self.langs, d_model=24, encoder_layers=2, decoder_layers=2, encoder_attention_heads=2, decoder_attention_heads=2, encoder_ffn_dim=32, decoder_ffn_dim=32, max_position_embeddings=48, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) def _get_config_and_data(self): input_ids = torch.tensor( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ], dtype=torch.long, device=torch_device, ) batch_size = input_ids.shape[0] config = self._get_config() return config, input_ids, batch_size def test_generate_beam_search(self): input_ids = torch.tensor([[71, 82, 2], [68, 34, 2]], dtype=torch.long, device=torch_device) config = self._get_config() lm_model = FSMTForConditionalGeneration(config).to(torch_device) lm_model.eval() max_length = 5 new_input_ids = lm_model.generate( input_ids.clone(), do_sample=True, num_return_sequences=1, num_beams=2, no_repeat_ngram_size=3, max_length=max_length, ) self.assertEqual(new_input_ids.shape, (input_ids.shape[0], max_length)) def test_shift_tokens_right(self): input_ids = torch.tensor([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]], dtype=torch.long) shifted = shift_tokens_right(input_ids, 1) n_pad_before = input_ids.eq(1).float().sum() n_pad_after = shifted.eq(1).float().sum() self.assertEqual(shifted.shape, input_ids.shape) self.assertEqual(n_pad_after, n_pad_before - 1) self.assertTrue(torch.eq(shifted[:, 0], 2).all()) def test_generate_fp16(self): config, input_ids, batch_size = self._get_config_and_data() attention_mask = input_ids.ne(1).to(torch_device) model = FSMTForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def test_dummy_inputs(self): config, *_ = self._get_config_and_data() model = FSMTForConditionalGeneration(config).eval().to(torch_device) model(**model.dummy_inputs) def test_prepare_fsmt_decoder_inputs(self): config, *_ = self._get_config_and_data() input_ids = _long_tensor(([4, 4, 2])) decoder_input_ids = _long_tensor([[26388, 2, config.pad_token_id]]) causal_mask_dtype = torch.float32 ignore = torch.finfo(causal_mask_dtype).min decoder_input_ids, decoder_attn_mask, causal_mask = _prepare_fsmt_decoder_inputs( config, input_ids, decoder_input_ids, causal_mask_dtype=causal_mask_dtype ) expected_causal_mask = torch.tensor( [[0, ignore, ignore], [0, 0, ignore], [0, 0, 0]] # never attend to the final token, because its pad ).to(input_ids.device) self.assertEqual(decoder_attn_mask.size(), decoder_input_ids.size()) self.assertTrue(torch.eq(expected_causal_mask, causal_mask).all()) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: if len(prefix) > 0: prefix = f"{prefix}: " raise AssertionError(f"{prefix}{a} != {b}") def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 pairs = [ ["en-ru"], ["ru-en"], ["en-de"], ["de-en"], ] @require_torch @require_sentencepiece @require_tokenizers class FSMTModelIntegrationTests(unittest.TestCase): tokenizers_cache = {} models_cache = {} default_mname = "facebook/wmt19-en-ru" @cached_property def default_tokenizer(self): return self.get_tokenizer(self.default_mname) @cached_property def default_model(self): return self.get_model(self.default_mname) def get_tokenizer(self, mname): if mname not in self.tokenizers_cache: self.tokenizers_cache[mname] = FSMTTokenizer.from_pretrained(mname) return self.tokenizers_cache[mname] def get_model(self, mname): if mname not in self.models_cache: self.models_cache[mname] = FSMTForConditionalGeneration.from_pretrained(mname).to(torch_device) if torch_device == "cuda": self.models_cache[mname].half() return self.models_cache[mname] @slow def test_inference_no_head(self): tokenizer = self.default_tokenizer model = FSMTModel.from_pretrained(self.default_mname).to(torch_device) src_text = "My friend computer will translate this for me" input_ids = tokenizer([src_text], return_tensors="pt")["input_ids"] input_ids = _long_tensor(input_ids).to(torch_device) inputs_dict = prepare_fsmt_inputs_dict(model.config, input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 10, model.config.tgt_vocab_size)) self.assertEqual(output.shape, expected_shape) # expected numbers were generated when en-ru model, using just fairseq's model4.pt # may have to adjust if switched to a different checkpoint expected_slice = torch.tensor( [[-1.5753, -1.5753, 2.8975], [-0.9540, -0.9540, 1.0299], [-3.3131, -3.3131, 0.5219]] ).to(torch_device) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def translation_setup(self, pair): text = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } src, tgt = pair.split("-") print(f"Testing {src} -> {tgt}") mname = f"facebook/wmt19-{pair}" src_text = text[src] tgt_text = text[tgt] tokenizer = self.get_tokenizer(mname) model = self.get_model(mname) return tokenizer, model, src_text, tgt_text @parameterized.expand(pairs) @slow def test_translation_direct(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) input_ids = tokenizer.encode(src_text, return_tensors="pt").to(torch_device) outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) assert decoded == tgt_text, f"\n\ngot: {decoded}\nexp: {tgt_text}\n" @parameterized.expand(pairs) @slow def test_translation_pipeline(self, pair): tokenizer, model, src_text, tgt_text = self.translation_setup(pair) device = 0 if torch_device == "cuda" else -1 pipeline = TranslationPipeline(model, tokenizer, framework="pt", device=device) output = pipeline([src_text]) self.assertEqual([tgt_text], [x["translation_text"] for x in output]) @require_torch class TestSinusoidalPositionalEmbeddings(unittest.TestCase): padding_idx = 1 tolerance = 1e-4 def test_basic(self): input_ids = torch.tensor([[4, 10]], dtype=torch.long, device=torch_device) emb1 = SinusoidalPositionalEmbedding(num_positions=6, embedding_dim=6, padding_idx=self.padding_idx).to( torch_device ) emb = emb1(input_ids) desired_weights = torch.tensor( [ [9.0930e-01, 1.9999e-02, 2.0000e-04, -4.1615e-01, 9.9980e-01, 1.0000e00], [1.4112e-01, 2.9995e-02, 3.0000e-04, -9.8999e-01, 9.9955e-01, 1.0000e00], ] ).to(torch_device) self.assertTrue( torch.allclose(emb[0], desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{emb[0]}\n", ) def test_odd_embed_dim(self): # odd embedding_dim is allowed SinusoidalPositionalEmbedding(num_positions=4, embedding_dim=5, padding_idx=self.padding_idx).to(torch_device) # odd num_embeddings is allowed SinusoidalPositionalEmbedding(num_positions=5, embedding_dim=4, padding_idx=self.padding_idx).to(torch_device) @unittest.skip("different from marian (needs more research)") def test_positional_emb_weights_against_marian(self): desired_weights = torch.tensor( [ [0, 0, 0, 0, 0], [0.84147096, 0.82177866, 0.80180490, 0.78165019, 0.76140374], [0.90929741, 0.93651021, 0.95829457, 0.97505713, 0.98720258], ] ) emb1 = SinusoidalPositionalEmbedding(num_positions=512, embedding_dim=512, padding_idx=self.padding_idx).to( torch_device ) weights = emb1.weights.data[:3, :5] # XXX: only the 1st and 3rd lines match - this is testing against # verbatim copy of SinusoidalPositionalEmbedding from fairseq self.assertTrue( torch.allclose(weights, desired_weights, atol=self.tolerance), msg=f"\nexp:\n{desired_weights}\ngot:\n{weights}\n", ) # test that forward pass is just a lookup, there is no ignore padding logic input_ids = torch.tensor( [[4, 10, self.padding_idx, self.padding_idx, self.padding_idx]], dtype=torch.long, device=torch_device ) no_cache_pad_zero = emb1(input_ids)[0] # XXX: only the 1st line matches the 3rd self.assertTrue( torch.allclose(torch.tensor(desired_weights, device=torch_device), no_cache_pad_zero[:3, :5], atol=1e-3) )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/videomae/convert_videomae_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VideoMAE checkpoints from the original repository: https://github.com/MCG-NJU/VideoMAE""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEFeatureExtractor, VideoMAEForPreTraining, VideoMAEForVideoClassification, ) def get_videomae_config(model_name): config = VideoMAEConfig() if "large" in model_name: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.decoder_num_hidden_layers = 12 config.decoder_num_attention_heads = 8 config.decoder_hidden_size = 512 config.decoder_intermediate_size = 2048 if "finetuned" not in model_name: config.use_mean_pooling = False if "finetuned" in model_name: repo_id = "huggingface/label-files" if "kinetics" in model_name: config.num_labels = 400 filename = "kinetics400-id2label.json" elif "ssv2" in model_name: config.num_labels = 174 filename = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.") id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def rename_key(name): if "encoder." in name: name = name.replace("encoder.", "") if "cls_token" in name: name = name.replace("cls_token", "videomae.embeddings.cls_token") if "decoder_pos_embed" in name: name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed") if "pos_embed" in name and "decoder" not in name: name = name.replace("pos_embed", "videomae.embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "videomae.embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "videomae.embeddings.norm") if "decoder.blocks" in name: name = name.replace("decoder.blocks", "decoder.decoder_layers") if "blocks" in name: name = name.replace("blocks", "videomae.encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name and "bias" not in name: name = name.replace("attn", "attention.self") if "attn" in name: name = name.replace("attn", "attention.attention") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "decoder_embed" in name: name = name.replace("decoder_embed", "decoder.decoder_embed") if "decoder_norm" in name: name = name.replace("decoder_norm", "decoder.decoder_norm") if "decoder_pred" in name: name = name.replace("decoder_pred", "decoder.decoder_pred") if "norm.weight" in name and "decoder" not in name and "fc" not in name: name = name.replace("norm.weight", "videomae.layernorm.weight") if "norm.bias" in name and "decoder" not in name and "fc" not in name: name = name.replace("norm.bias", "videomae.layernorm.bias") if "head" in name and "decoder" not in name: name = name.replace("head", "classifier") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if key.startswith("encoder."): key = key.replace("encoder.", "") if "qkv" in key: key_split = key.split(".") if key.startswith("decoder.blocks"): dim = config.decoder_hidden_size layer_num = int(key_split[2]) prefix = "decoder.decoder_layers." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: dim = config.hidden_size layer_num = int(key_split[1]) prefix = "videomae.encoder.layer." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: orig_state_dict[rename_key(key)] = val return orig_state_dict # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) def convert_videomae_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub): config = get_videomae_config(model_name) if "finetuned" in model_name: model = VideoMAEForVideoClassification(config) else: model = VideoMAEForPreTraining(config) # download original checkpoint, hosted on Google Drive output = "pytorch_model.bin" gdown.cached_download(checkpoint_url, output, quiet=False) files = torch.load(output, map_location="cpu") if "model" in files: state_dict = files["model"] else: state_dict = files["module"] new_state_dict = convert_state_dict(state_dict, config) model.load_state_dict(new_state_dict) model.eval() # verify model on basic input feature_extractor = VideoMAEFeatureExtractor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) video = prepare_video() inputs = feature_extractor(video, return_tensors="pt") if "finetuned" not in model_name: local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") inputs["bool_masked_pos"] = torch.load(local_path) outputs = model(**inputs) logits = outputs.logits model_names = [ # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-base": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]]) elif model_name == "videomae-base-short": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]]) # we verified the loss both for normalized and unnormalized targets for this one expected_loss = torch.tensor([0.5142]) if config.norm_pix_loss else torch.tensor([0.6469]) elif model_name == "videomae-large": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]]) elif model_name == "videomae-large-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.0771, 0.0011, -0.3625]) elif model_name == "videomae-base-short-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.6588, 0.0990, -0.2493]) elif model_name == "videomae-base-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]) elif model_name == "videomae-base-short-ssv2": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]]) elif model_name == "videomae-base-short-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([-0.0537, -0.1539, -0.3266]) elif model_name == "videomae-base-ssv2": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]]) elif model_name == "videomae-base-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([0.1961, -0.8337, -0.6389]) else: raise ValueError(f"Model name not supported. Should be one of {model_names}") # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4) else: print("Logits:", logits[0, :3, :3]) assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4) print("Logits ok!") # verify loss, if applicable if model_name == "videomae-base-short": loss = outputs.loss assert torch.allclose(loss, expected_loss, atol=1e-4) print("Loss ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and feature extractor to {pytorch_dump_folder_path}") feature_extractor.save_pretrained(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing to the hub...") model.push_to_hub(model_name, organization="nielsr") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert VideoMAE checkpoints from the original repository: https://github.com/MCG-NJU/VideoMAE""" import argparse import json import gdown import numpy as np import torch from huggingface_hub import hf_hub_download from transformers import ( VideoMAEConfig, VideoMAEFeatureExtractor, VideoMAEForPreTraining, VideoMAEForVideoClassification, ) def get_videomae_config(model_name): config = VideoMAEConfig() if "large" in model_name: config.hidden_size = 1024 config.intermediate_size = 4096 config.num_hidden_layers = 24 config.num_attention_heads = 16 config.decoder_num_hidden_layers = 12 config.decoder_num_attention_heads = 8 config.decoder_hidden_size = 512 config.decoder_intermediate_size = 2048 if "finetuned" not in model_name: config.use_mean_pooling = False if "finetuned" in model_name: repo_id = "huggingface/label-files" if "kinetics" in model_name: config.num_labels = 400 filename = "kinetics400-id2label.json" elif "ssv2" in model_name: config.num_labels = 174 filename = "something-something-v2-id2label.json" else: raise ValueError("Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.") id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} return config def rename_key(name): if "encoder." in name: name = name.replace("encoder.", "") if "cls_token" in name: name = name.replace("cls_token", "videomae.embeddings.cls_token") if "decoder_pos_embed" in name: name = name.replace("decoder_pos_embed", "decoder.decoder_pos_embed") if "pos_embed" in name and "decoder" not in name: name = name.replace("pos_embed", "videomae.embeddings.position_embeddings") if "patch_embed.proj" in name: name = name.replace("patch_embed.proj", "videomae.embeddings.patch_embeddings.projection") if "patch_embed.norm" in name: name = name.replace("patch_embed.norm", "videomae.embeddings.norm") if "decoder.blocks" in name: name = name.replace("decoder.blocks", "decoder.decoder_layers") if "blocks" in name: name = name.replace("blocks", "videomae.encoder.layer") if "attn.proj" in name: name = name.replace("attn.proj", "attention.output.dense") if "attn" in name and "bias" not in name: name = name.replace("attn", "attention.self") if "attn" in name: name = name.replace("attn", "attention.attention") if "norm1" in name: name = name.replace("norm1", "layernorm_before") if "norm2" in name: name = name.replace("norm2", "layernorm_after") if "mlp.fc1" in name: name = name.replace("mlp.fc1", "intermediate.dense") if "mlp.fc2" in name: name = name.replace("mlp.fc2", "output.dense") if "decoder_embed" in name: name = name.replace("decoder_embed", "decoder.decoder_embed") if "decoder_norm" in name: name = name.replace("decoder_norm", "decoder.decoder_norm") if "decoder_pred" in name: name = name.replace("decoder_pred", "decoder.decoder_pred") if "norm.weight" in name and "decoder" not in name and "fc" not in name: name = name.replace("norm.weight", "videomae.layernorm.weight") if "norm.bias" in name and "decoder" not in name and "fc" not in name: name = name.replace("norm.bias", "videomae.layernorm.bias") if "head" in name and "decoder" not in name: name = name.replace("head", "classifier") return name def convert_state_dict(orig_state_dict, config): for key in orig_state_dict.copy().keys(): val = orig_state_dict.pop(key) if key.startswith("encoder."): key = key.replace("encoder.", "") if "qkv" in key: key_split = key.split(".") if key.startswith("decoder.blocks"): dim = config.decoder_hidden_size layer_num = int(key_split[2]) prefix = "decoder.decoder_layers." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: dim = config.hidden_size layer_num = int(key_split[1]) prefix = "videomae.encoder.layer." if "weight" in key: orig_state_dict[f"{prefix}{layer_num}.attention.attention.query.weight"] = val[:dim, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.key.weight"] = val[dim : dim * 2, :] orig_state_dict[f"{prefix}{layer_num}.attention.attention.value.weight"] = val[-dim:, :] else: orig_state_dict[rename_key(key)] = val return orig_state_dict # We will verify our results on a video of eating spaghetti # Frame indices used: [164 168 172 176 181 185 189 193 198 202 206 210 215 219 223 227] def prepare_video(): file = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video", filename="eating_spaghetti.npy", repo_type="dataset" ) video = np.load(file) return list(video) def convert_videomae_checkpoint(checkpoint_url, pytorch_dump_folder_path, model_name, push_to_hub): config = get_videomae_config(model_name) if "finetuned" in model_name: model = VideoMAEForVideoClassification(config) else: model = VideoMAEForPreTraining(config) # download original checkpoint, hosted on Google Drive output = "pytorch_model.bin" gdown.cached_download(checkpoint_url, output, quiet=False) files = torch.load(output, map_location="cpu") if "model" in files: state_dict = files["model"] else: state_dict = files["module"] new_state_dict = convert_state_dict(state_dict, config) model.load_state_dict(new_state_dict) model.eval() # verify model on basic input feature_extractor = VideoMAEFeatureExtractor(image_mean=[0.5, 0.5, 0.5], image_std=[0.5, 0.5, 0.5]) video = prepare_video() inputs = feature_extractor(video, return_tensors="pt") if "finetuned" not in model_name: local_path = hf_hub_download(repo_id="hf-internal-testing/bool-masked-pos", filename="bool_masked_pos.pt") inputs["bool_masked_pos"] = torch.load(local_path) outputs = model(**inputs) logits = outputs.logits model_names = [ # Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600) "videomae-base-short", "videomae-base-short-finetuned-kinetics", "videomae-base", "videomae-base-finetuned-kinetics", "videomae-large", "videomae-large-finetuned-kinetics", # Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400) "videomae-base-short-ssv2", "videomae-base-short-finetuned-ssv2", "videomae-base-ssv2", "videomae-base-finetuned-ssv2", ] # NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5] if model_name == "videomae-base": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7739, 0.7968, 0.7089], [0.6701, 0.7487, 0.6209], [0.4287, 0.5158, 0.4773]]) elif model_name == "videomae-base-short": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7994, 0.9612, 0.8508], [0.7401, 0.8958, 0.8302], [0.5862, 0.7468, 0.7325]]) # we verified the loss both for normalized and unnormalized targets for this one expected_loss = torch.tensor([0.5142]) if config.norm_pix_loss else torch.tensor([0.6469]) elif model_name == "videomae-large": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.7149, 0.7997, 0.6966], [0.6768, 0.7869, 0.6948], [0.5139, 0.6221, 0.5605]]) elif model_name == "videomae-large-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.0771, 0.0011, -0.3625]) elif model_name == "videomae-base-short-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.6588, 0.0990, -0.2493]) elif model_name == "videomae-base-finetuned-kinetics": expected_shape = torch.Size([1, 400]) expected_slice = torch.tensor([0.3669, -0.0688, -0.2421]) elif model_name == "videomae-base-short-ssv2": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.4712, 0.5296, 0.5786], [0.2278, 0.2729, 0.4026], [0.0352, 0.0730, 0.2506]]) elif model_name == "videomae-base-short-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([-0.0537, -0.1539, -0.3266]) elif model_name == "videomae-base-ssv2": expected_shape = torch.Size([1, 1408, 1536]) expected_slice = torch.tensor([[0.8131, 0.8727, 0.8546], [0.7366, 0.9377, 0.8870], [0.5935, 0.8874, 0.8564]]) elif model_name == "videomae-base-finetuned-ssv2": expected_shape = torch.Size([1, 174]) expected_slice = torch.tensor([0.1961, -0.8337, -0.6389]) else: raise ValueError(f"Model name not supported. Should be one of {model_names}") # verify logits assert logits.shape == expected_shape if "finetuned" in model_name: assert torch.allclose(logits[0, :3], expected_slice, atol=1e-4) else: print("Logits:", logits[0, :3, :3]) assert torch.allclose(logits[0, :3, :3], expected_slice, atol=1e-4) print("Logits ok!") # verify loss, if applicable if model_name == "videomae-base-short": loss = outputs.loss assert torch.allclose(loss, expected_loss, atol=1e-4) print("Loss ok!") if pytorch_dump_folder_path is not None: print(f"Saving model and feature extractor to {pytorch_dump_folder_path}") feature_extractor.save_pretrained(pytorch_dump_folder_path) model.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print("Pushing to the hub...") model.push_to_hub(model_name, organization="nielsr") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--checkpoint_url", default="https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&amp;export=download&amp;confirm=t&amp;uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4", type=str, help=( "URL of the original PyTorch checkpoint (on Google Drive) you'd like to convert. Should be a direct" " download link." ), ) parser.add_argument( "--pytorch_dump_folder_path", default="/Users/nielsrogge/Documents/VideoMAE/Test", type=str, help="Path to the output PyTorch model directory.", ) parser.add_argument("--model_name", default="videomae-base", type=str, help="Name of the model.") parser.add_argument( "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub." ) args = parser.parse_args() convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/time_series_transformer/__init__.py
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/big_bird/tokenization_big_bird.py
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for BigBird.""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/bigbird-roberta-base": 4096, "google/bigbird-roberta-large": 4096, "google/bigbird-base-trivia-itc": 4096, } class BigBirdTokenizer(PreTrainedTokenizer): """ Construct a BigBird tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. bos_token (`str`, *optional*, defaults to `"<s>"`): The begin of sequence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] def __init__( self, vocab_file, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", sep_token="[SEP]", mask_token="[MASK]", cls_token="[CLS]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, mask_token=mask_token, cls_token=cls_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True, spaces_between_special_tokens: bool = True, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: text = re.sub(r" (\[(MASK|SEP)\])", r"\1", " ".join(sub_texts)) else: text = "".join(sub_texts) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Big Bird sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
# coding=utf-8 # Copyright 2021 Google Research and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for BigBird.""" import os import re from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model", "google/bigbird-roberta-large": ( "https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model" ), "google/bigbird-base-trivia-itc": ( "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model" ), } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/bigbird-roberta-base": 4096, "google/bigbird-roberta-large": 4096, "google/bigbird-base-trivia-itc": 4096, } class BigBirdTokenizer(PreTrainedTokenizer): """ Construct a BigBird tokenizer. Based on [SentencePiece](https://github.com/google/sentencepiece). This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): [SentencePiece](https://github.com/google/sentencepiece) file (generally has a *.spm* extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. bos_token (`str`, *optional*, defaults to `"<s>"`): The begin of sequence token. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. sep_token (`str`, *optional*, defaults to `"[SEP]"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"[CLS]"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. mask_token (`str`, *optional*, defaults to `"[MASK]"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. sp_model_kwargs (`dict`, *optional*): Will be passed to the `SentencePieceProcessor.__init__()` method. The [Python wrapper for SentencePiece](https://github.com/google/sentencepiece/tree/master/python) can be used, among other things, to set: - `enable_sampling`: Enable subword regularization. - `nbest_size`: Sampling parameters for unigram. Invalid for BPE-Dropout. - `nbest_size = {0,1}`: No sampling is performed. - `nbest_size > 1`: samples from the nbest_size results. - `nbest_size < 0`: assuming that nbest_size is infinite and samples from the all hypothesis (lattice) using forward-filtering-and-backward-sampling algorithm. - `alpha`: Smoothing parameter for unigram sampling, and dropout probability of merge operations for BPE-dropout. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] prefix_tokens: List[int] = [] def __init__( self, vocab_file, unk_token="<unk>", bos_token="<s>", eos_token="</s>", pad_token="<pad>", sep_token="[SEP]", mask_token="[MASK]", cls_token="[CLS]", sp_model_kwargs: Optional[Dict[str, Any]] = None, **kwargs, ) -> None: bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, mask_token=mask_token, cls_token=cls_token, sp_model_kwargs=self.sp_model_kwargs, **kwargs, ) self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(vocab_file) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d # for backward compatibility if not hasattr(self, "sp_model_kwargs"): self.sp_model_kwargs = {} self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _tokenize(self, text: str) -> List[str]: """Take as input a string and return a list of strings (tokens) for words/sub-words""" return self.sp_model.encode(text, out_type=str) def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" current_sub_tokens = [] out_string = "" prev_is_special = False for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: if not prev_is_special: out_string += " " out_string += self.sp_model.decode(current_sub_tokens) + token prev_is_special = True current_sub_tokens = [] else: current_sub_tokens.append(token) prev_is_special = False out_string += self.sp_model.decode(current_sub_tokens) return out_string.strip() def _decode( self, token_ids: List[int], skip_special_tokens: bool = False, clean_up_tokenization_spaces: bool = True, spaces_between_special_tokens: bool = True, **kwargs, ) -> str: self._decode_use_source_tokenizer = kwargs.pop("use_source_tokenizer", False) filtered_tokens = self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens) # To avoid mixing byte-level and unicode for byte-level BPT # we need to build string separately for added tokens and byte-level tokens # cf. https://github.com/huggingface/transformers/issues/1133 sub_texts = [] current_sub_text = [] for token in filtered_tokens: if skip_special_tokens and token in self.all_special_ids: continue if token in self.added_tokens_encoder: if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) current_sub_text = [] sub_texts.append(token) else: current_sub_text.append(token) if current_sub_text: sub_texts.append(self.convert_tokens_to_string(current_sub_text)) # Mimic the behavior of the Rust tokenizer: # No space before [MASK] and [SEP] if spaces_between_special_tokens: text = re.sub(r" (\[(MASK|SEP)\])", r"\1", " ".join(sub_texts)) else: text = "".join(sub_texts) if clean_up_tokenization_spaces: clean_text = self.clean_up_tokenization(text) return clean_text else: return text def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file, out_vocab_file) elif not os.path.isfile(self.vocab_file): with open(out_vocab_file, "wb") as fi: content_spiece_model = self.sp_model.serialized_model_proto() fi.write(content_spiece_model) return (out_vocab_file,) def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A Big Bird sequence has the following format: - single sequence: `[CLS] X [SEP]` - pair of sequences: `[CLS] A [SEP] B [SEP]` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + token_ids_1 + sep def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1] def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence pair mask has the following format: :: 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 | first sequence | second sequence | If `token_ids_1` is `None`, this method only returns the first portion of the mask (0s). Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s). """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./examples/pytorch/semantic-segmentation/run_semantic_segmentation.py
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import json import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from huggingface_hub import hf_hub_download from PIL import Image from torch import nn from torchvision import transforms from torchvision.transforms import functional import transformers from transformers import ( AutoConfig, AutoImageProcessor, AutoModelForSemanticSegmentation, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version """ Finetuning any 🤗 Transformers model supported by AutoModelForSemanticSegmentation for semantic segmentation leveraging the Trainer API.""" logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.27.0.dev0") require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") def pad_if_smaller(img, size, fill=0): size = (size, size) if isinstance(size, int) else size original_width, original_height = img.size pad_height = size[1] - original_height if original_height < size[1] else 0 pad_width = size[0] - original_width if original_width < size[0] else 0 img = functional.pad(img, (0, 0, pad_width, pad_height), fill=fill) return img class Compose: def __init__(self, transforms): self.transforms = transforms def __call__(self, image, target): for t in self.transforms: image, target = t(image, target) return image, target class Identity: def __init__(self): pass def __call__(self, image, target): return image, target class Resize: def __init__(self, size): self.size = size def __call__(self, image, target): image = functional.resize(image, self.size) target = functional.resize(target, self.size, interpolation=transforms.InterpolationMode.NEAREST) return image, target class RandomResize: def __init__(self, min_size, max_size=None): self.min_size = min_size if max_size is None: max_size = min_size self.max_size = max_size def __call__(self, image, target): size = random.randint(self.min_size, self.max_size) image = functional.resize(image, size) target = functional.resize(target, size, interpolation=transforms.InterpolationMode.NEAREST) return image, target class RandomCrop: def __init__(self, size): self.size = size if isinstance(size, tuple) else (size, size) def __call__(self, image, target): image = pad_if_smaller(image, self.size) target = pad_if_smaller(target, self.size, fill=255) crop_params = transforms.RandomCrop.get_params(image, self.size) image = functional.crop(image, *crop_params) target = functional.crop(target, *crop_params) return image, target class RandomHorizontalFlip: def __init__(self, flip_prob): self.flip_prob = flip_prob def __call__(self, image, target): if random.random() < self.flip_prob: image = functional.hflip(image) target = functional.hflip(target) return image, target class PILToTensor: def __call__(self, image, target): image = functional.pil_to_tensor(image) target = torch.as_tensor(np.array(target), dtype=torch.int64) return image, target class ConvertImageDtype: def __init__(self, dtype): self.dtype = dtype def __call__(self, image, target): image = functional.convert_image_dtype(image, self.dtype) return image, target class Normalize: def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, image, target): image = functional.normalize(image, mean=self.mean, std=self.std) return image, target class ReduceLabels: def __call__(self, image, target): if not isinstance(target, np.ndarray): target = np.array(target).astype(np.uint8) # avoid using underflow conversion target[target == 0] = 255 target = target - 1 target[target == 254] = 255 target = Image.fromarray(target) return image, target @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field( default="segments/sidewalk-semantic", metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." }, ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_val_split: Optional[float] = field( default=0.15, metadata={"help": "Percent to split off of train for validation."} ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) reduce_labels: Optional[bool] = field( default=False, metadata={"help": "Whether or not to reduce all labels by 1 and replace background by 255."}, ) def __post_init__(self): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="nvidia/mit-b0", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_semantic_segmentation", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(data_args.dataset_name, cache_dir=model_args.cache_dir) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if data_args.dataset_name == "scene_parse_150": repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" else: repo_id = data_args.dataset_name filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: str(k) for k, v in id2label.items()} # Load the mean IoU metric from the datasets package metric = evaluate.load("mean_iou") # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. @torch.no_grad() def compute_metrics(eval_pred): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) # scale the logits to the size of the label logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() metrics = metric.compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=image_processor.do_reduce_labels, ) # add per category metrics as individual key-value pairs per_category_accuracy = metrics.pop("per_category_accuracy").tolist() per_category_iou = metrics.pop("per_category_iou").tolist() metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) return metrics config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, label2id=label2id, id2label=id2label, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSemanticSegmentation.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Define torchvision transforms to be applied to each image + target. # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py if "shortest_edge" in image_processor.size: # We instead set the target size as (shortest_edge, shortest_edge) to here to ensure all images are batchable. size = (image_processor.size["shortest_edge"], image_processor.size["shortest_edge"]) else: size = (image_processor.size["height"], image_processor.size["width"]) train_transforms = Compose( [ ReduceLabels() if data_args.reduce_labels else Identity(), RandomCrop(size=size), RandomHorizontalFlip(flip_prob=0.5), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=image_processor.image_mean, std=image_processor.image_std), ] ) # Define torchvision transform to be applied to each image. # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) val_transforms = Compose( [ ReduceLabels() if data_args.reduce_labels else Identity(), Resize(size=size), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=image_processor.image_mean, std=image_processor.image_std), ] ) def preprocess_train(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = train_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = {} encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding def preprocess_val(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = val_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = {} encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: dataset["train"] = ( dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ) # Set the training transforms dataset["train"].set_transform(preprocess_train) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: dataset["validation"] = ( dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms dataset["validation"].set_transform(preprocess_val) # Initalize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=image_processor, data_collator=default_data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "dataset": data_args.dataset_name, "tags": ["image-segmentation", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
#!/usr/bin/env python # coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and import json import logging import os import random import sys from dataclasses import dataclass, field from typing import Optional import evaluate import numpy as np import torch from datasets import load_dataset from huggingface_hub import hf_hub_download from PIL import Image from torch import nn from torchvision import transforms from torchvision.transforms import functional import transformers from transformers import ( AutoConfig, AutoImageProcessor, AutoModelForSemanticSegmentation, HfArgumentParser, Trainer, TrainingArguments, default_data_collator, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version """ Finetuning any 🤗 Transformers model supported by AutoModelForSemanticSegmentation for semantic segmentation leveraging the Trainer API.""" logger = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.27.0.dev0") require_version("datasets>=2.0.0", "To fix: pip install -r examples/pytorch/semantic-segmentation/requirements.txt") def pad_if_smaller(img, size, fill=0): size = (size, size) if isinstance(size, int) else size original_width, original_height = img.size pad_height = size[1] - original_height if original_height < size[1] else 0 pad_width = size[0] - original_width if original_width < size[0] else 0 img = functional.pad(img, (0, 0, pad_width, pad_height), fill=fill) return img class Compose: def __init__(self, transforms): self.transforms = transforms def __call__(self, image, target): for t in self.transforms: image, target = t(image, target) return image, target class Identity: def __init__(self): pass def __call__(self, image, target): return image, target class Resize: def __init__(self, size): self.size = size def __call__(self, image, target): image = functional.resize(image, self.size) target = functional.resize(target, self.size, interpolation=transforms.InterpolationMode.NEAREST) return image, target class RandomResize: def __init__(self, min_size, max_size=None): self.min_size = min_size if max_size is None: max_size = min_size self.max_size = max_size def __call__(self, image, target): size = random.randint(self.min_size, self.max_size) image = functional.resize(image, size) target = functional.resize(target, size, interpolation=transforms.InterpolationMode.NEAREST) return image, target class RandomCrop: def __init__(self, size): self.size = size if isinstance(size, tuple) else (size, size) def __call__(self, image, target): image = pad_if_smaller(image, self.size) target = pad_if_smaller(target, self.size, fill=255) crop_params = transforms.RandomCrop.get_params(image, self.size) image = functional.crop(image, *crop_params) target = functional.crop(target, *crop_params) return image, target class RandomHorizontalFlip: def __init__(self, flip_prob): self.flip_prob = flip_prob def __call__(self, image, target): if random.random() < self.flip_prob: image = functional.hflip(image) target = functional.hflip(target) return image, target class PILToTensor: def __call__(self, image, target): image = functional.pil_to_tensor(image) target = torch.as_tensor(np.array(target), dtype=torch.int64) return image, target class ConvertImageDtype: def __init__(self, dtype): self.dtype = dtype def __call__(self, image, target): image = functional.convert_image_dtype(image, self.dtype) return image, target class Normalize: def __init__(self, mean, std): self.mean = mean self.std = std def __call__(self, image, target): image = functional.normalize(image, mean=self.mean, std=self.std) return image, target class ReduceLabels: def __call__(self, image, target): if not isinstance(target, np.ndarray): target = np.array(target).astype(np.uint8) # avoid using underflow conversion target[target == 0] = 255 target = target - 1 target[target == 254] = 255 target = Image.fromarray(target) return image, target @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ dataset_name: Optional[str] = field( default="segments/sidewalk-semantic", metadata={ "help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)." }, ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_val_split: Optional[float] = field( default=0.15, metadata={"help": "Percent to split off of train for validation."} ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) }, ) max_eval_samples: Optional[int] = field( default=None, metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) }, ) reduce_labels: Optional[bool] = field( default=False, metadata={"help": "Whether or not to reduce all labels by 1 and replace background by 255."}, ) def __post_init__(self): if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None): raise ValueError( "You must specify either a dataset name from the hub or a train and/or validation directory." ) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( default="nvidia/mit-b0", metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}, ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) image_processor_name: str = field(default=None, metadata={"help": "Name or path of preprocessor config."}) use_auth_token: bool = field( default=False, metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) }, ) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("run_semantic_segmentation", model_args, data_args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() log_level = training_args.get_process_log_level() logger.setLevel(log_level) transformers.utils.logging.set_verbosity(log_level) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) logger.info(f"Training/evaluation parameters {training_args}") # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Load dataset # In distributed training, the load_dataset function guarantees that only one local process can concurrently # download the dataset. # TODO support datasets from local folders dataset = load_dataset(data_args.dataset_name, cache_dir=model_args.cache_dir) # Rename column names to standardized names (only "image" and "label" need to be present) if "pixel_values" in dataset["train"].column_names: dataset = dataset.rename_columns({"pixel_values": "image"}) if "annotation" in dataset["train"].column_names: dataset = dataset.rename_columns({"annotation": "label"}) # If we don't have a validation split, split off a percentage of train as validation. data_args.train_val_split = None if "validation" in dataset.keys() else data_args.train_val_split if isinstance(data_args.train_val_split, float) and data_args.train_val_split > 0.0: split = dataset["train"].train_test_split(data_args.train_val_split) dataset["train"] = split["train"] dataset["validation"] = split["test"] # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. if data_args.dataset_name == "scene_parse_150": repo_id = "huggingface/label-files" filename = "ade20k-id2label.json" else: repo_id = data_args.dataset_name filename = "id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} label2id = {v: str(k) for k, v in id2label.items()} # Load the mean IoU metric from the datasets package metric = evaluate.load("mean_iou") # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a # predictions and label_ids field) and has to return a dictionary string to float. @torch.no_grad() def compute_metrics(eval_pred): logits, labels = eval_pred logits_tensor = torch.from_numpy(logits) # scale the logits to the size of the label logits_tensor = nn.functional.interpolate( logits_tensor, size=labels.shape[-2:], mode="bilinear", align_corners=False, ).argmax(dim=1) pred_labels = logits_tensor.detach().cpu().numpy() metrics = metric.compute( predictions=pred_labels, references=labels, num_labels=len(id2label), ignore_index=0, reduce_labels=image_processor.do_reduce_labels, ) # add per category metrics as individual key-value pairs per_category_accuracy = metrics.pop("per_category_accuracy").tolist() per_category_iou = metrics.pop("per_category_iou").tolist() metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)}) metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)}) return metrics config = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path, label2id=label2id, id2label=id2label, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForSemanticSegmentation.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) image_processor = AutoImageProcessor.from_pretrained( model_args.image_processor_name or model_args.model_name_or_path, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Define torchvision transforms to be applied to each image + target. # Not that straightforward in torchvision: https://github.com/pytorch/vision/issues/9 # Currently based on official torchvision references: https://github.com/pytorch/vision/blob/main/references/segmentation/transforms.py if "shortest_edge" in image_processor.size: # We instead set the target size as (shortest_edge, shortest_edge) to here to ensure all images are batchable. size = (image_processor.size["shortest_edge"], image_processor.size["shortest_edge"]) else: size = (image_processor.size["height"], image_processor.size["width"]) train_transforms = Compose( [ ReduceLabels() if data_args.reduce_labels else Identity(), RandomCrop(size=size), RandomHorizontalFlip(flip_prob=0.5), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=image_processor.image_mean, std=image_processor.image_std), ] ) # Define torchvision transform to be applied to each image. # jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1) val_transforms = Compose( [ ReduceLabels() if data_args.reduce_labels else Identity(), Resize(size=size), PILToTensor(), ConvertImageDtype(torch.float), Normalize(mean=image_processor.image_mean, std=image_processor.image_std), ] ) def preprocess_train(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = train_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = {} encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding def preprocess_val(example_batch): pixel_values = [] labels = [] for image, target in zip(example_batch["image"], example_batch["label"]): image, target = val_transforms(image.convert("RGB"), target) pixel_values.append(image) labels.append(target) encoding = {} encoding["pixel_values"] = torch.stack(pixel_values) encoding["labels"] = torch.stack(labels) return encoding if training_args.do_train: if "train" not in dataset: raise ValueError("--do_train requires a train dataset") if data_args.max_train_samples is not None: dataset["train"] = ( dataset["train"].shuffle(seed=training_args.seed).select(range(data_args.max_train_samples)) ) # Set the training transforms dataset["train"].set_transform(preprocess_train) if training_args.do_eval: if "validation" not in dataset: raise ValueError("--do_eval requires a validation dataset") if data_args.max_eval_samples is not None: dataset["validation"] = ( dataset["validation"].shuffle(seed=training_args.seed).select(range(data_args.max_eval_samples)) ) # Set the validation transforms dataset["validation"].set_transform(preprocess_val) # Initalize our trainer trainer = Trainer( model=model, args=training_args, train_dataset=dataset["train"] if training_args.do_train else None, eval_dataset=dataset["validation"] if training_args.do_eval else None, compute_metrics=compute_metrics, tokenizer=image_processor, data_collator=default_data_collator, ) # Training if training_args.do_train: checkpoint = None if training_args.resume_from_checkpoint is not None: checkpoint = training_args.resume_from_checkpoint elif last_checkpoint is not None: checkpoint = last_checkpoint train_result = trainer.train(resume_from_checkpoint=checkpoint) trainer.save_model() trainer.log_metrics("train", train_result.metrics) trainer.save_metrics("train", train_result.metrics) trainer.save_state() # Evaluation if training_args.do_eval: metrics = trainer.evaluate() trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Write model card and (optionally) push to hub kwargs = { "finetuned_from": model_args.model_name_or_path, "dataset": data_args.dataset_name, "tags": ["image-segmentation", "vision"], } if training_args.push_to_hub: trainer.push_to_hub(**kwargs) else: trainer.create_model_card(**kwargs) if __name__ == "__main__": main()
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/speech_encoder_decoder/convert_mbart_wav2vec2_seq2seq_original_to_pytorch.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Wav2Vec2 checkpoint.""" import argparse import fairseq import torch from torch import nn from transformers import ( MBart50Tokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2Model, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } TOP_LEVEL_KEYS = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights_wav2vec2(fairseq_model, hf_model): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.feature_extractor adapter = hf_model.adapter for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]): load_adapter(name, value, adapter, unused_weights) is_used = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "bias" in name: weight_type = "bias" elif "weight" in name: weight_type = "weight" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def load_adapter(full_name, value, adapter, unused_weights): name = full_name.split("adaptor.")[-1] items = name.split(".") if items[1].isdigit(): layer_id = int(items[1]) else: layer_id = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." adapter.proj_layer_norm.bias.data = value logger.info(f"Adapter proj layer norm bias was initialized from {full_name}.") if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." adapter.proj_layer_norm.weight.data = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." adapter.proj.bias.data = value logger.info(f"Adapter proj layer bias was initialized from {full_name}.") if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." adapter.proj.weight.data = value logger.info(f"Adapter proj layer weight was initialized from {full_name}.") elif isinstance(layer_id, int): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." adapter.layers[layer_id].conv.bias.data = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.") elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." adapter.layers[layer_id].conv.weight.data = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.") else: unused_weights.append(full_name) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer @torch.no_grad() def convert_wav2vec2_checkpoint( checkpoint_path, pytorch_dump_folder_path, dict_path, config_yaml_path, encoder_config_path, decoder_config_path, add_adapter, adapter_kernel_size, adapter_stride, decoder_start_token_id, encoder_output_dim, ): """ Copy/paste/tweak model's weights to transformers design. """ # load configs encoder_config = Wav2Vec2Config.from_pretrained( encoder_config_path, add_adapter=True, adapter_stride=adapter_stride, adapter_kernel_size=adapter_kernel_size, use_auth_token=True, output_hidden_size=encoder_output_dim, ) decoder_config = MBartConfig.from_pretrained(decoder_config_path) # load model model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/")[:-1]), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, }, ) model = model[0].eval() # load feature extractor feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(encoder_config_path, use_auth_token=True) # set weights for wav2vec2 encoder hf_encoder = Wav2Vec2Model(encoder_config) recursively_load_weights_wav2vec2(model.encoder, hf_encoder) # load decoder weights hf_decoder = MBartForCausalLM(decoder_config) missing_keys, unexpected_keys = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False) logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}") logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}") hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder) hf_wav2vec.config.tie_word_embeddings = False tokenizer = MBart50Tokenizer(dict_path) tokenizer.save_pretrained(pytorch_dump_folder_path) config = hf_wav2vec.config.to_dict() config["pad_token_id"] = tokenizer.pad_token_id config["bos_token_id"] = tokenizer.bos_token_id config["eos_token_id"] = tokenizer.eos_token_id config["tokenizer_class"] = "mbart50" config["feature_extractor_type"] = "wav2vec2" config["decoder_start_token_id"] = tokenizer.eos_token_id config["forced_bos_token_id"] = 250004 config["forced_eos_token_id"] = tokenizer.eos_token_id hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) feature_extractor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config") args = parser.parse_args() convert_wav2vec2_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Wav2Vec2 checkpoint.""" import argparse import fairseq import torch from torch import nn from transformers import ( MBart50Tokenizer, MBartConfig, MBartForCausalLM, SpeechEncoderDecoderConfig, SpeechEncoderDecoderModel, Wav2Vec2Config, Wav2Vec2FeatureExtractor, Wav2Vec2Model, logging, ) logging.set_verbosity_info() logger = logging.get_logger(__name__) MAPPING = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } TOP_LEVEL_KEYS = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", ] def set_recursively(hf_pointer, key, value, full_name, weight_type): for attribute in key.split("."): hf_pointer = getattr(hf_pointer, attribute) if weight_type is not None: hf_shape = getattr(hf_pointer, weight_type).shape else: hf_shape = hf_pointer.shape assert hf_shape == value.shape, ( f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be" f" {value.shape} for {full_name}" ) if weight_type == "weight": hf_pointer.weight.data = value elif weight_type == "weight_g": hf_pointer.weight_g.data = value elif weight_type == "weight_v": hf_pointer.weight_v.data = value elif weight_type == "bias": hf_pointer.bias.data = value else: hf_pointer.data = value logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.") def recursively_load_weights_wav2vec2(fairseq_model, hf_model): unused_weights = [] fairseq_dict = fairseq_model.state_dict() feature_extractor = hf_model.feature_extractor adapter = hf_model.adapter for name, value in fairseq_dict.items(): is_used = False if "conv_layers" in name: load_conv_layer( name, value, feature_extractor, unused_weights, hf_model.config.feat_extract_norm == "group", ) is_used = True elif any(x in name for x in ["adaptor", "w2v_encoder.proj.", "w2v_proj_ln."]): load_adapter(name, value, adapter, unused_weights) is_used = True else: for key, mapped_key in MAPPING.items(): if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: is_used = True if "*" in mapped_key: layer_index = name.split(key)[0].split(".")[-2] mapped_key = mapped_key.replace("*", layer_index) if "weight_g" in name: weight_type = "weight_g" elif "weight_v" in name: weight_type = "weight_v" elif "bias" in name: weight_type = "bias" elif "weight" in name: weight_type = "weight" else: weight_type = None set_recursively(hf_model, mapped_key, value, name, weight_type) continue if not is_used: unused_weights.append(name) logger.warning(f"Unused weights: {unused_weights}") def load_conv_layer(full_name, value, feature_extractor, unused_weights, use_group_norm): name = full_name.split("conv_layers.")[-1] items = name.split(".") layer_id = int(items[0]) type_id = int(items[1]) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.bias.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].conv.weight.data = value logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}.") elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was" " found." ) feature_extractor.conv_layers[layer_id].layer_norm.bias.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"{full_name} has size {value.shape}, but" f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." ) feature_extractor.conv_layers[layer_id].layer_norm.weight.data = value logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.") else: unused_weights.append(full_name) def load_adapter(full_name, value, adapter, unused_weights): name = full_name.split("adaptor.")[-1] items = name.split(".") if items[1].isdigit(): layer_id = int(items[1]) else: layer_id = None if "adaptor" not in full_name: if "proj_ln" in full_name: # has to be layer norm if "bias" in name: assert ( value.shape == adapter.proj_layer_norm.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.bias.data.shape} was found." adapter.proj_layer_norm.bias.data = value logger.info(f"Adapter proj layer norm bias was initialized from {full_name}.") if "weight" in name: assert ( value.shape == adapter.proj_layer_norm.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj_layer_norm.weight.data.shape} was found." adapter.proj_layer_norm.weight.data = value else: # has to be projection layer if "bias" in name: assert ( value.shape == adapter.proj.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.bias.data.shape} was found." adapter.proj.bias.data = value logger.info(f"Adapter proj layer bias was initialized from {full_name}.") if "weight" in name: assert ( value.shape == adapter.proj.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.proj.weight.data.shape} was found." adapter.proj.weight.data = value logger.info(f"Adapter proj layer weight was initialized from {full_name}.") elif isinstance(layer_id, int): if "bias" in name: assert ( value.shape == adapter.layers[layer_id].conv.bias.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.bias.data.shape} was found." adapter.layers[layer_id].conv.bias.data = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.") elif "weight" in name: assert ( value.shape == adapter.layers[layer_id].conv.weight.data.shape ), f"{full_name} has size {value.shape}, but {adapter.layers[layer_id].conv.weight.data.shape} was found." adapter.layers[layer_id].conv.weight.data = value logger.info(f"Adapter layer {layer_id} bias was initialized from {full_name}.") else: unused_weights.append(full_name) def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer @torch.no_grad() def convert_wav2vec2_checkpoint( checkpoint_path, pytorch_dump_folder_path, dict_path, config_yaml_path, encoder_config_path, decoder_config_path, add_adapter, adapter_kernel_size, adapter_stride, decoder_start_token_id, encoder_output_dim, ): """ Copy/paste/tweak model's weights to transformers design. """ # load configs encoder_config = Wav2Vec2Config.from_pretrained( encoder_config_path, add_adapter=True, adapter_stride=adapter_stride, adapter_kernel_size=adapter_kernel_size, use_auth_token=True, output_hidden_size=encoder_output_dim, ) decoder_config = MBartConfig.from_pretrained(decoder_config_path) # load model model, _, _ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={ "config_yaml": config_yaml_path, "data": "/".join(dict_path.split("/")[:-1]), "w2v_path": checkpoint_path, "load_pretrained_decoder_from": None, }, ) model = model[0].eval() # load feature extractor feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(encoder_config_path, use_auth_token=True) # set weights for wav2vec2 encoder hf_encoder = Wav2Vec2Model(encoder_config) recursively_load_weights_wav2vec2(model.encoder, hf_encoder) # load decoder weights hf_decoder = MBartForCausalLM(decoder_config) missing_keys, unexpected_keys = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict(), strict=False) logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}") logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}") hf_wav2vec = SpeechEncoderDecoderModel(encoder=hf_encoder, decoder=hf_decoder) hf_wav2vec.config.tie_word_embeddings = False tokenizer = MBart50Tokenizer(dict_path) tokenizer.save_pretrained(pytorch_dump_folder_path) config = hf_wav2vec.config.to_dict() config["pad_token_id"] = tokenizer.pad_token_id config["bos_token_id"] = tokenizer.bos_token_id config["eos_token_id"] = tokenizer.eos_token_id config["tokenizer_class"] = "mbart50" config["feature_extractor_type"] = "wav2vec2" config["decoder_start_token_id"] = tokenizer.eos_token_id config["forced_bos_token_id"] = 250004 config["forced_eos_token_id"] = tokenizer.eos_token_id hf_wav2vec.config = SpeechEncoderDecoderConfig.from_dict(config) hf_wav2vec.save_pretrained(pytorch_dump_folder_path) feature_extractor.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_yaml_path", default=None, type=str, help="Path to yaml file of fine-tuned model") parser.add_argument( "--encoder_config_path", default="facebook/wav2vec2-xls-r-1b", type=str, help="Path to hf encoder wav2vec2 checkpoint config", ) parser.add_argument( "--decoder_config_path", default="facebook/mbart-large-50-one-to-many-mmt", type=str, help="Path to hf decoder checkpoint config", ) parser.add_argument("--add_adapter", default=True, type=bool, help="whethere to add model adapter layers") parser.add_argument("--adapter_stride", default=2, type=int, help="stride of adapter layers") parser.add_argument("--adapter_kernel_size", default=3, type=int, help="kernel size of adapter layers") parser.add_argument("--encoder_output_dim", default=1024, type=int, help="encoder output dim") parser.add_argument("--start_token_id", default=250004, type=int, help="`decoder_start_token_id` of model config") args = parser.parse_args() convert_wav2vec2_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.dict_path, args.config_yaml_path, encoder_config_path=args.encoder_config_path, decoder_config_path=args.decoder_config_path, add_adapter=args.add_adapter, adapter_kernel_size=args.adapter_kernel_size, adapter_stride=args.adapter_stride, decoder_start_token_id=args.start_token_id, encoder_output_dim=args.encoder_output_dim, )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/electra/configuration_electra.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ELECTRA model configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/electra-small-generator": "https://huggingface.co/google/electra-small-generator/resolve/main/config.json", "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/config.json", "google/electra-large-generator": "https://huggingface.co/google/electra-large-generator/resolve/main/config.json", "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/config.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/config.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/config.json" ), } class ElectraConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA [google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. embedding_size (`int`, *optional*, defaults to 128): Dimensionality of the encoder layers and the pooler layer. hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 1024): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. summary_type (`str`, *optional*, defaults to `"first"`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Has to be one of the following options: - `"last"`: Take the last token hidden state (like XLNet). - `"first"`: Take the first token hidden state (like BERT). - `"mean"`: Take the mean of all tokens hidden states. - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - `"attn"`: Not implemented now, use multi-head attention. summary_use_proj (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Whether or not to add a projection after the vector extraction. summary_activation (`str`, *optional*): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation. summary_last_dropout (`float`, *optional*, defaults to 0.0): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. The dropout ratio to be used after the projection and activation. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import ElectraConfig, ElectraModel >>> # Initializing a ELECTRA electra-base-uncased style configuration >>> configuration = ElectraConfig() >>> # Initializing a model (with random weights) from the electra-base-uncased style configuration >>> model = ElectraModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "electra" def __init__( self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, summary_type="first", summary_use_proj=True, summary_activation="gelu", summary_last_dropout=0.1, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_last_dropout = summary_last_dropout self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout class ElectraOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ELECTRA model configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP = { "google/electra-small-generator": "https://huggingface.co/google/electra-small-generator/resolve/main/config.json", "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/config.json", "google/electra-large-generator": "https://huggingface.co/google/electra-large-generator/resolve/main/config.json", "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/config.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/config.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/config.json" ), } class ElectraConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`ElectraModel`] or a [`TFElectraModel`]. It is used to instantiate a ELECTRA model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ELECTRA [google/electra-small-discriminator](https://huggingface.co/google/electra-small-discriminator) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the ELECTRA model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. embedding_size (`int`, *optional*, defaults to 128): Dimensionality of the encoder layers and the pooler layer. hidden_size (`int`, *optional*, defaults to 256): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 4): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 1024): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`ElectraModel`] or [`TFElectraModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): The epsilon used by the layer normalization layers. summary_type (`str`, *optional*, defaults to `"first"`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Has to be one of the following options: - `"last"`: Take the last token hidden state (like XLNet). - `"first"`: Take the first token hidden state (like BERT). - `"mean"`: Take the mean of all tokens hidden states. - `"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - `"attn"`: Not implemented now, use multi-head attention. summary_use_proj (`bool`, *optional*, defaults to `True`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Whether or not to add a projection after the vector extraction. summary_activation (`str`, *optional*): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Pass `"gelu"` for a gelu activation to the output, any other value will result in no activation. summary_last_dropout (`float`, *optional*, defaults to 0.0): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. The dropout ratio to be used after the projection and activation. position_embedding_type (`str`, *optional*, defaults to `"absolute"`): Type of position embedding. Choose one of `"absolute"`, `"relative_key"`, `"relative_key_query"`. For positional embeddings use `"absolute"`. For more information on `"relative_key"`, please refer to [Self-Attention with Relative Position Representations (Shaw et al.)](https://arxiv.org/abs/1803.02155). For more information on `"relative_key_query"`, please refer to *Method 4* in [Improve Transformer Models with Better Relative Position Embeddings (Huang et al.)](https://arxiv.org/abs/2009.13658). use_cache (`bool`, *optional*, defaults to `True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if `config.is_decoder=True`. classifier_dropout (`float`, *optional*): The dropout ratio for the classification head. Examples: ```python >>> from transformers import ElectraConfig, ElectraModel >>> # Initializing a ELECTRA electra-base-uncased style configuration >>> configuration = ElectraConfig() >>> # Initializing a model (with random weights) from the electra-base-uncased style configuration >>> model = ElectraModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ```""" model_type = "electra" def __init__( self, vocab_size=30522, embedding_size=128, hidden_size=256, num_hidden_layers=12, num_attention_heads=4, intermediate_size=1024, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, summary_type="first", summary_use_proj=True, summary_activation="gelu", summary_last_dropout=0.1, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.embedding_size = embedding_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_last_dropout = summary_last_dropout self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.classifier_dropout = classifier_dropout class ElectraOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/led/tokenization_led.py
# coding=utf-8 # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for LED.""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) # Copied from transformers.models.bart.tokenization_bart.get_pairs def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class LEDTokenizer(PreTrainedTokenizer): """ Constructs a LED tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ``` >>> from transformers import LEDTokenizer >>> tokenizer = LEDTokenizer.from_pretrained("allenai/led-base-16384") >>> tokenizer("Hello world")['input_ids'] [0, 31414, 232, 2] >>> tokenizer(" Hello world")['input_ids'] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (BART tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.__init__ def __init__( self, vocab_file, merges_file, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=False, **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs, ) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def vocab_size(self): return len(self.encoder) # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.get_vocab def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.bpe def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._tokenize def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.build_inputs_with_special_tokens with BART->LED def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A LED sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.create_token_type_ids_from_sequences with BART->LED def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.prepare_for_tokenization def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: encoded_inputs = super()._pad( encoded_inputs=encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: required_input = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. needs_to_be_padded = len(encoded_inputs["global_attention_mask"]) != len(required_input) if needs_to_be_padded: difference = len(required_input) - len(encoded_inputs["global_attention_mask"]) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` encoded_inputs["global_attention_mask"] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": encoded_inputs["global_attention_mask"] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs
# coding=utf-8 # Copyright 2021 Iz Beltagy, Matthew E. Peters, Arman Cohan and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for LED.""" import json import os from functools import lru_cache from typing import Dict, List, Optional, Tuple, Union import regex as re from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...tokenization_utils_base import BatchEncoding, EncodedInput from ...utils import PaddingStrategy, logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "vocab.json", "merges_file": "merges.txt"} # See all LED models at https://huggingface.co/models?filter=LED PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json", }, "merges_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt", }, "tokenizer_file": { "allenai/led-base-16384": "https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "allenai/led-base-16384": 16384, } @lru_cache() # Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode def bytes_to_unicode(): """ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control characters the bpe code barfs on. The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup tables between utf-8 bytes and unicode strings. """ bs = ( list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1)) ) cs = bs[:] n = 0 for b in range(2**8): if b not in bs: bs.append(b) cs.append(2**8 + n) n += 1 cs = [chr(n) for n in cs] return dict(zip(bs, cs)) # Copied from transformers.models.bart.tokenization_bart.get_pairs def get_pairs(word): """ Return set of symbol pairs in a word. Word is represented as tuple of symbols (symbols being variable-length strings). """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs class LEDTokenizer(PreTrainedTokenizer): """ Constructs a LED tokenizer, which is smilar to the ROBERTa tokenizer, using byte-level Byte-Pair-Encoding. This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will be encoded differently whether it is at the beginning of the sentence (without space) or not: ``` >>> from transformers import LEDTokenizer >>> tokenizer = LEDTokenizer.from_pretrained("allenai/led-base-16384") >>> tokenizer("Hello world")['input_ids'] [0, 31414, 232, 2] >>> tokenizer(" Hello world")['input_ids'] [0, 20920, 232, 2] ``` You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer or when you call it on some text, but since the model was not pretrained this way, it might yield a decrease in performance. <Tip> When used with `is_split_into_words=True`, this tokenizer will add a space before each word (even the first one). </Tip> This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (`str`): Path to the vocabulary file. merges_file (`str`): Path to the merges file. errors (`str`, *optional*, defaults to `"replace"`): Paradigm to follow when decoding bytes to UTF-8. See [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information. bos_token (`str`, *optional*, defaults to `"<s>"`): The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token. <Tip> When building a sequence using special tokens, this is not the token that is used for the beginning of sequence. The token used is the `cls_token`. </Tip> eos_token (`str`, *optional*, defaults to `"</s>"`): The end of sequence token. <Tip> When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the `sep_token`. </Tip> sep_token (`str`, *optional*, defaults to `"</s>"`): The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for sequence classification or for a text and a question for question answering. It is also used as the last token of a sequence built with special tokens. cls_token (`str`, *optional*, defaults to `"<s>"`): The classifier token which is used when doing sequence classification (classification of the whole sequence instead of per-token classification). It is the first token of the sequence when built with special tokens. unk_token (`str`, *optional*, defaults to `"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (`str`, *optional*, defaults to `"<pad>"`): The token used for padding, for example when batching sequences of different lengths. mask_token (`str`, *optional*, defaults to `"<mask>"`): The token used for masking values. This is the token used when training this model with masked language modeling. This is the token which the model will try to predict. add_prefix_space (`bool`, *optional*, defaults to `False`): Whether or not to add an initial space to the input. This allows to treat the leading word just as any other word. (BART tokenizer detect beginning of words by the preceding space). """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.__init__ def __init__( self, vocab_file, merges_file, errors="replace", bos_token="<s>", eos_token="</s>", sep_token="</s>", cls_token="<s>", unk_token="<unk>", pad_token="<pad>", mask_token="<mask>", add_prefix_space=False, **kwargs, ): bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token sep_token = AddedToken(sep_token, lstrip=False, rstrip=False) if isinstance(sep_token, str) else sep_token cls_token = AddedToken(cls_token, lstrip=False, rstrip=False) if isinstance(cls_token, str) else cls_token unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token # Mask token behave like a normal word, i.e. include the space before it mask_token = AddedToken(mask_token, lstrip=True, rstrip=False) if isinstance(mask_token, str) else mask_token super().__init__( errors=errors, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, sep_token=sep_token, cls_token=cls_token, pad_token=pad_token, mask_token=mask_token, add_prefix_space=add_prefix_space, **kwargs, ) with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} self.errors = errors # how to handle errors in decoding self.byte_encoder = bytes_to_unicode() self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: bpe_merges = merges_handle.read().split("\n")[1:-1] bpe_merges = [tuple(merge.split()) for merge in bpe_merges] self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges)))) self.cache = {} self.add_prefix_space = add_prefix_space # Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions self.pat = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""") @property # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size def vocab_size(self): return len(self.encoder) # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.get_vocab def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.bpe def bpe(self, token): if token in self.cache: return self.cache[token] word = tuple(token) pairs = get_pairs(word) if not pairs: return token while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) self.cache[token] = word return word # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._tokenize def _tokenize(self, text): """Tokenize a string.""" bpe_tokens = [] for token in re.findall(self.pat, text): token = "".join( self.byte_encoder[b] for b in token.encode("utf-8") ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case) bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" ")) return bpe_tokens # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._convert_token_to_id def _convert_token_to_id(self, token): """Converts a token (str) in an id using the vocab.""" return self.encoder.get(token, self.encoder.get(self.unk_token)) # Copied from transformers.models.bart.tokenization_bart.BartTokenizer._convert_id_to_token def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" return self.decoder.get(index) # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.convert_tokens_to_string def convert_tokens_to_string(self, tokens): """Converts a sequence of tokens (string) in a single string.""" text = "".join(tokens) text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors) return text # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.save_vocabulary def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error(f"Vocabulary path ({save_directory}) should be a directory") return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n") index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!" ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.build_inputs_with_special_tokens with BART->LED def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. A LED sequence has the following format: - single sequence: `<s> X </s>` - pair of sequences: `<s> A </s></s> B </s>` Args: token_ids_0 (`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens. """ if token_ids_1 is None: return [self.cls_token_id] + token_ids_0 + [self.sep_token_id] cls = [self.cls_token_id] sep = [self.sep_token_id] return cls + token_ids_0 + sep + sep + token_ids_1 + sep # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.get_special_tokens_mask def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer `prepare_for_model` method. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. already_has_special_tokens (`bool`, *optional*, defaults to `False`): Whether or not the token list is already formatted with special tokens for the model. Returns: `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True ) if token_ids_1 is None: return [1] + ([0] * len(token_ids_0)) + [1] return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1] # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.create_token_type_ids_from_sequences with BART->LED def create_token_type_ids_from_sequences( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Create a mask from the two sequences passed to be used in a sequence-pair classification task. LED does not make use of token type ids, therefore a list of zeros is returned. Args: token_ids_0 (`List[int]`): List of IDs. token_ids_1 (`List[int]`, *optional*): Optional second list of IDs for sequence pairs. Returns: `List[int]`: List of zeros. """ sep = [self.sep_token_id] cls = [self.cls_token_id] if token_ids_1 is None: return len(cls + token_ids_0 + sep) * [0] return len(cls + token_ids_0 + sep + sep + token_ids_1 + sep) * [0] # Copied from transformers.models.bart.tokenization_bart.BartTokenizer.prepare_for_tokenization def prepare_for_tokenization(self, text, is_split_into_words=False, **kwargs): add_prefix_space = kwargs.pop("add_prefix_space", self.add_prefix_space) if (is_split_into_words or add_prefix_space) and (len(text) > 0 and not text[0].isspace()): text = " " + text return (text, kwargs) def _pad( self, encoded_inputs: Union[Dict[str, EncodedInput], BatchEncoding], max_length: Optional[int] = None, padding_strategy: PaddingStrategy = PaddingStrategy.DO_NOT_PAD, pad_to_multiple_of: Optional[int] = None, return_attention_mask: Optional[bool] = None, ) -> dict: encoded_inputs = super()._pad( encoded_inputs=encoded_inputs, max_length=max_length, padding_strategy=padding_strategy, pad_to_multiple_of=pad_to_multiple_of, return_attention_mask=return_attention_mask, ) # Load from model defaults if return_attention_mask is None: return_attention_mask = "attention_mask" in self.model_input_names if return_attention_mask and "global_attention_mask" in encoded_inputs: required_input = encoded_inputs[self.model_input_names[0]] # `global_attention_mask` need to have the same length as other (sequential) inputs. needs_to_be_padded = len(encoded_inputs["global_attention_mask"]) != len(required_input) if needs_to_be_padded: difference = len(required_input) - len(encoded_inputs["global_attention_mask"]) if self.padding_side == "right": # Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend` encoded_inputs["global_attention_mask"] = ( encoded_inputs["global_attention_mask"] + [-1] * difference ) elif self.padding_side == "left": encoded_inputs["global_attention_mask"] = [-1] * difference + encoded_inputs[ "global_attention_mask" ] else: raise ValueError("Invalid padding strategy:" + str(self.padding_side)) return encoded_inputs
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/led/test_tokenization_led.py
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class TestTokenizationLED(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LEDTokenizer rust_tokenizer_class = LEDTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" @cached_property def default_tokenizer(self): return LEDTokenizer.from_pretrained("allenai/led-base-16384") @cached_property def default_tokenizer_fast(self): return LEDTokenizerFast.from_pretrained("allenai/led-base-16384") @require_torch def test_prepare_batch(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, max_length=len(expected_src_tokens), padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(expected_src_tokens, result) @require_torch def test_prepare_batch_empty_target_text(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, padding=True, return_tensors="pt") self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("labels", batch) self.assertNotIn("decoder_attention_mask", batch) @require_torch def test_tokenizer_as_target_length(self): tgt_text = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: targets = tokenizer(text_target=tgt_text, max_length=32, padding="max_length", return_tensors="pt") self.assertEqual(32, targets["input_ids"].shape[1]) @require_torch def test_prepare_batch_not_longer_than_maxlen(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer( ["I am a small frog" * 1024, "I am a small frog"], padding=True, truncation=True, return_tensors="pt" ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 5122)) @require_torch def test_special_tokens(self): src_text = ["A long paragraph for summarization."] tgt_text = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: inputs = tokenizer(src_text, return_tensors="pt") targets = tokenizer(text_target=tgt_text, return_tensors="pt") input_ids = inputs["input_ids"] labels = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @require_torch def test_global_attention_mask(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: src_text = ["Summary of the text.", "Another summary."] expected_global_attention_mask = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] encoded_output = tokenizer(src_text, padding=False) encoded_output["global_attention_mask"] = [[0] * len(x) for x in encoded_output["input_ids"]] outputs = tokenizer.pad(encoded_output) self.assertSequenceEqual(outputs["global_attention_mask"], expected_global_attention_mask) def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class TestTokenizationLED(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LEDTokenizer rust_tokenizer_class = LEDTokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] vocab_tokens = dict(zip(vocab, range(len(vocab)))) merges = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] self.special_tokens_map = {"unk_token": "<unk>"} self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["merges_file"]) with open(self.vocab_file, "w", encoding="utf-8") as fp: fp.write(json.dumps(vocab_tokens) + "\n") with open(self.merges_file, "w", encoding="utf-8") as fp: fp.write("\n".join(merges)) def get_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_rust_tokenizer(self, **kwargs): kwargs.update(self.special_tokens_map) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): return "lower newer", "lower newer" @cached_property def default_tokenizer(self): return LEDTokenizer.from_pretrained("allenai/led-base-16384") @cached_property def default_tokenizer_fast(self): return LEDTokenizerFast.from_pretrained("allenai/led-base-16384") @require_torch def test_prepare_batch(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [0, 250, 251, 17818, 13, 39186, 1938, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, max_length=len(expected_src_tokens), padding=True, return_tensors="pt") self.assertIsInstance(batch, BatchEncoding) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) result = batch.input_ids.tolist()[0] self.assertListEqual(expected_src_tokens, result) @require_torch def test_prepare_batch_empty_target_text(self): src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer(src_text, padding=True, return_tensors="pt") self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("labels", batch) self.assertNotIn("decoder_attention_mask", batch) @require_torch def test_tokenizer_as_target_length(self): tgt_text = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: targets = tokenizer(text_target=tgt_text, max_length=32, padding="max_length", return_tensors="pt") self.assertEqual(32, targets["input_ids"].shape[1]) @require_torch def test_prepare_batch_not_longer_than_maxlen(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: batch = tokenizer( ["I am a small frog" * 1024, "I am a small frog"], padding=True, truncation=True, return_tensors="pt" ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 5122)) @require_torch def test_special_tokens(self): src_text = ["A long paragraph for summarization."] tgt_text = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: inputs = tokenizer(src_text, return_tensors="pt") targets = tokenizer(text_target=tgt_text, return_tensors="pt") input_ids = inputs["input_ids"] labels = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item()) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item()) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item()) @require_torch def test_global_attention_mask(self): for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: src_text = ["Summary of the text.", "Another summary."] expected_global_attention_mask = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] encoded_output = tokenizer(src_text, padding=False) encoded_output["global_attention_mask"] = [[0] * len(x) for x in encoded_output["input_ids"]] outputs = tokenizer.pad(encoded_output) self.assertSequenceEqual(outputs["global_attention_mask"], expected_global_attention_mask) def test_pretokenized_inputs(self): pass def test_embeded_special_tokens(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) tokenizer_p = self.tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = "A, <mask> AllenNLP sentence." tokens_r = tokenizer_r.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) tokens_p = tokenizer_p.encode_plus(sentence, add_special_tokens=True, return_token_type_ids=True) self.assertEqual(sum(tokens_r["token_type_ids"]), sum(tokens_p["token_type_ids"])) self.assertEqual( sum(tokens_r["attention_mask"]) / len(tokens_r["attention_mask"]), sum(tokens_p["attention_mask"]) / len(tokens_p["attention_mask"]), ) tokens_r_str = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"]) tokens_p_str = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"]) self.assertSequenceEqual(tokens_p["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual(tokens_r["input_ids"], [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2]) self.assertSequenceEqual( tokens_p_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( tokens_r_str, ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/vit/feature_extraction_vit.py
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for ViT.""" import warnings from ...utils import logging from .image_processing_vit import ViTImageProcessor logger = logging.get_logger(__name__) class ViTFeatureExtractor(ViTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class ViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use ViTImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Feature extractor class for ViT.""" import warnings from ...utils import logging from .image_processing_vit import ViTImageProcessor logger = logging.get_logger(__name__) class ViTFeatureExtractor(ViTImageProcessor): def __init__(self, *args, **kwargs) -> None: warnings.warn( "The class ViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use ViTImageProcessor instead.", FutureWarning, ) super().__init__(*args, **kwargs)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/opt/convert_opt_original_pytorch_checkpoint_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OPT checkpoint.""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_checkpoint(checkpoint_path): """Checkpoint path should end in model.pt""" sd = torch.load(checkpoint_path, map_location="cpu") if "model" in sd.keys(): sd = torch.load(checkpoint_path, map_location="cpu")["model"] # pop unnecessary weights keys_to_delete = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(key) keys_to_rename = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: sd[new_key] = sd.pop(old_key) keys = list(sd.keys()) for key in keys: if ".qkj_proj." in key: value = sd[key] # We split QKV in seperate Q,K,V q_name = key.replace(".qkv_proj.", ".q_proj.") k_name = key.replace(".qkv_proj.", ".k_proj.") v_name = key.replace(".qkv_proj.", ".v_proj.") depth = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 k, v, q = torch.split(value, depth // 3, dim=0) sd[q_name] = q sd[k_name] = k sd[v_name] = v del sd[key] return sd @torch.no_grad() def convert_opt_checkpoint(checkpoint_path, pytorch_dump_folder_path, config=None): """ Copy/paste/tweak model's weights to our BERT structure. """ state_dict = load_checkpoint(checkpoint_path) if config is not None: config = OPTConfig.from_pretrained(config) else: config = OPTConfig() model = OPTModel(config).half().eval() model.load_state_dict(state_dict) # Check results Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") args = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OPT checkpoint.""" import argparse from pathlib import Path import torch from transformers import OPTConfig, OPTModel from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) def load_checkpoint(checkpoint_path): """Checkpoint path should end in model.pt""" sd = torch.load(checkpoint_path, map_location="cpu") if "model" in sd.keys(): sd = torch.load(checkpoint_path, map_location="cpu")["model"] # pop unnecessary weights keys_to_delete = [ "decoder.version", "decoder.output_projection.weight", ] for key in keys_to_delete: if key in sd: sd.pop(key) keys_to_rename = { "decoder.project_in_dim.weight": "decoder.project_in.weight", "decoder.project_out_dim.weight": "decoder.project_out.weight", "decoder.layer_norm.weight": "decoder.final_layer_norm.weight", "decoder.layer_norm.bias": "decoder.final_layer_norm.bias", } for old_key, new_key in keys_to_rename.items(): if old_key in sd: sd[new_key] = sd.pop(old_key) keys = list(sd.keys()) for key in keys: if ".qkj_proj." in key: value = sd[key] # We split QKV in seperate Q,K,V q_name = key.replace(".qkv_proj.", ".q_proj.") k_name = key.replace(".qkv_proj.", ".k_proj.") v_name = key.replace(".qkv_proj.", ".v_proj.") depth = value.shape[0] assert depth % 3 == 0 # `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming: # https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97 k, v, q = torch.split(value, depth // 3, dim=0) sd[q_name] = q sd[k_name] = k sd[v_name] = v del sd[key] return sd @torch.no_grad() def convert_opt_checkpoint(checkpoint_path, pytorch_dump_folder_path, config=None): """ Copy/paste/tweak model's weights to our BERT structure. """ state_dict = load_checkpoint(checkpoint_path) if config is not None: config = OPTConfig.from_pretrained(config) else: config = OPTConfig() model = OPTModel(config).half().eval() model.load_state_dict(state_dict) # Check results Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--fairseq_path", type=str, help=( "path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:" " https://huggingface.co/models?other=opt_metasq" ), ) parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--hf_config", default=None, type=str, help="Define HF config.") args = parser.parse_args() convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./examples/research_projects/movement-pruning/masked_run_squad.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-pruning Masked BERT for question-answering on SQuAD.""" import argparse import glob import logging import os import random import timeit import numpy as np import torch from emmental import MaskedBertConfig, MaskedBertForQuestionAnswering from torch import nn from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertForQuestionAnswering, BertTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForQuestionAnswering, BertTokenizer), "masked_bert": (MaskedBertConfig, MaskedBertForQuestionAnswering, BertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def schedule_threshold( step: int, total_step: int, warmup_steps: int, initial_threshold: float, final_threshold: float, initial_warmup: int, final_warmup: int, final_lambda: float, ): if step <= initial_warmup * warmup_steps: threshold = initial_threshold elif step > (total_step - final_warmup * warmup_steps): threshold = final_threshold else: spars_warmup_steps = initial_warmup * warmup_steps spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff**3) regu_lambda = final_lambda * threshold / final_threshold return threshold, regu_lambda def regularization(model: nn.Module, mode: str): regu, counter = 0, 0 for name, param in model.named_parameters(): if "mask_scores" in name: if mode == "l1": regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() elif mode == "l0": regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() else: ValueError("Don't know this mode.") counter += 1 return regu / counter def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer, teacher=None): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter(log_dir=args.output_dir) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], "lr": args.mask_scores_learning_rate, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) # Distillation if teacher is not None: logger.info(" Training with distillation") global_step = 1 # Global TopK if args.global_topk: threshold_mem = None epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to global_step of last saved checkpoint from model path try: checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproducibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) threshold, regu_lambda = schedule_threshold( step=global_step, total_step=t_total, warmup_steps=args.warmup_steps, final_threshold=args.final_threshold, initial_threshold=args.initial_threshold, final_warmup=args.final_warmup, initial_warmup=args.initial_warmup, final_lambda=args.final_lambda, ) # Global TopK if args.global_topk: if threshold == 1.0: threshold = -1e2 # Or an indefinitely low quantity else: if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): # Sort all the values to get the global topK concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() threshold = threshold_mem else: threshold = threshold_mem inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) if "masked" in args.model_type: inputs["threshold"] = threshold outputs = model(**inputs) # model outputs are always tuple in transformers (see doc) loss, start_logits_stu, end_logits_stu = outputs # Distillation loss if teacher is not None: with torch.no_grad(): start_logits_tea, end_logits_tea = teacher( input_ids=inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], ) loss_start = nn.functional.kl_div( input=nn.functional.log_softmax(start_logits_stu / args.temperature, dim=-1), target=nn.functional.softmax(start_logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature**2) loss_end = nn.functional.kl_div( input=nn.functional.log_softmax(end_logits_stu / args.temperature, dim=-1), target=nn.functional.softmax(end_logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature**2) loss_logits = (loss_start + loss_end) / 2.0 loss = args.alpha_distil * loss_logits + args.alpha_ce * loss # Regularization if args.regularization is not None: regu_ = regularization(model=model, mode=args.regularization) loss = loss + regu_lambda * regu_ if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: tb_writer.add_scalar("threshold", threshold, global_step) for name, param in model.named_parameters(): if not param.requires_grad: continue tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) if "pooler" in name: continue tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) if args.regularization is not None and "mask_scores" in name: if args.regularization == "l1": perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() elif args.regularization == "l0": perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) learning_rate_scalar = scheduler.get_lr() tb_writer.add_scalar("lr", learning_rate_scalar[0], global_step) if len(learning_rate_scalar) > 1: for idx, lr in enumerate(learning_rate_scalar[1:]): tb_writer.add_scalar(f"lr/{idx+1}", lr, global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) if teacher is not None: tb_writer.add_scalar("loss/distil", loss_logits.item(), global_step) if args.regularization is not None: tb_writer.add_scalar("loss/regularization", regu_.item(), global_step) if (teacher is not None) or (args.regularization is not None): if (teacher is not None) and (args.regularization is not None): tb_writer.add_scalar( "loss/instant_ce", (loss.item() - regu_lambda * regu_.item() - args.alpha_distil * loss_logits.item()) / args.alpha_ce, global_step, ) elif teacher is not None: tb_writer.add_scalar( "loss/instant_ce", (loss.item() - args.alpha_distil * loss_logits.item()) / args.alpha_ce, global_step, ) else: tb_writer.add_scalar( "loss/instant_ce", loss.item() - regu_lambda * regu_.item(), global_step ) logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): model = nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() # Global TopK if args.global_topk: threshold_mem = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] example_indices = batch[3] # XLNet and XLM use more arguments for their predictions if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) # for lang_id-sensitive xlm models if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) if "masked" in args.model_type: inputs["threshold"] = args.final_threshold if args.global_topk: if threshold_mem is None: concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * args.final_threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() inputs["threshold"] = threshold_mem outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # XLNet and XLM use a more complex post-processing procedure if args.model_type in ["xlnet", "xlm"]: start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir if args.data_dir else "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", args.tokenizer_name if args.tokenizer_name else list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), list(filter(None, args.predict_file.split("/"))).pop() if evaluate else list(filter(None, args.train_file.split("/"))).pop(), ), ) # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): try: import tensorflow_datasets as tfds except ImportError: raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") if args.version_2_with_negative: logger.warning("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help=( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ), ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") # Pruning parameters parser.add_argument( "--mask_scores_learning_rate", default=1e-2, type=float, help="The Adam initial learning rate of the mask scores.", ) parser.add_argument( "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." ) parser.add_argument( "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." ) parser.add_argument( "--initial_warmup", default=1, type=int, help=( "Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays" "at its `initial_threshold` value (sparsity schedule)." ), ) parser.add_argument( "--final_warmup", default=2, type=int, help=( "Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays" "at its final_threshold value (sparsity schedule)." ), ) parser.add_argument( "--pruning_method", default="topK", type=str, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)." ), ) parser.add_argument( "--mask_init", default="constant", type=str, help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", ) parser.add_argument( "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." ) parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") parser.add_argument( "--final_lambda", default=0.0, type=float, help="Regularization intensity (used in conjunction with `regularization`.", ) parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") parser.add_argument( "--global_topk_frequency_compute", default=25, type=int, help="Frequency at which we compute the TopK global threshold.", ) # Distillation parameters (optional) parser.add_argument( "--teacher_type", default=None, type=str, help=( "Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for" " distillation." ), ) parser.add_argument( "--teacher_name_or_path", default=None, type=str, help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.", ) parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." ) parser.add_argument( "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." ) parser.add_argument( "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--verbose_logging", action="store_true", help=( "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation." ), ) parser.add_argument( "--lang_id", default=0, type=int, help=( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ), ) parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() # Regularization if args.regularization == "null": args.regularization = None if args.doc_stride >= args.max_seq_length - args.max_query_length: logger.warning( "WARNING - You've set a doc stride which may be superior to the document length in some " "examples. This could result in errors when building features from the examples. Please reduce the doc " "stride or increase the maximum length to ensure the features are correctly built." ) if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, pruning_method=args.pruning_method, mask_init=args.mask_init, mask_scale=args.mask_scale, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_distil > 0.0 assert args.alpha_distil + args.alpha_ce > 0.0 teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) teacher = teacher_model_class.from_pretrained( args.teacher_name_or_path, from_tf=False, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None, ) teacher.to(args.device) else: teacher = None if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) # , force_download=True) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] else: logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path) checkpoints = [args.model_name_or_path] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) # , force_download=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = {k + ("_{}".format(global_step) if global_step else ""): v for k, v in result.items()} results.update(result) logger.info("Results: {}".format(results)) predict_file = list(filter(None, args.predict_file.split("/"))).pop() if not os.path.exists(os.path.join(args.output_dir, predict_file)): os.makedirs(os.path.join(args.output_dir, predict_file)) output_eval_file = os.path.join(args.output_dir, predict_file, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("%s = %s\n" % (key, str(results[key]))) return results if __name__ == "__main__": main()
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-pruning Masked BERT for question-answering on SQuAD.""" import argparse import glob import logging import os import random import timeit import numpy as np import torch from emmental import MaskedBertConfig, MaskedBertForQuestionAnswering from torch import nn from torch.utils.data import DataLoader, RandomSampler, SequentialSampler from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertForQuestionAnswering, BertTokenizer, get_linear_schedule_with_warmup, squad_convert_examples_to_features, ) from transformers.data.metrics.squad_metrics import ( compute_predictions_log_probs, compute_predictions_logits, squad_evaluate, ) from transformers.data.processors.squad import SquadResult, SquadV1Processor, SquadV2Processor try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, BertForQuestionAnswering, BertTokenizer), "masked_bert": (MaskedBertConfig, MaskedBertForQuestionAnswering, BertTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def schedule_threshold( step: int, total_step: int, warmup_steps: int, initial_threshold: float, final_threshold: float, initial_warmup: int, final_warmup: int, final_lambda: float, ): if step <= initial_warmup * warmup_steps: threshold = initial_threshold elif step > (total_step - final_warmup * warmup_steps): threshold = final_threshold else: spars_warmup_steps = initial_warmup * warmup_steps spars_schedu_steps = (final_warmup + initial_warmup) * warmup_steps mul_coeff = 1 - (step - spars_warmup_steps) / (total_step - spars_schedu_steps) threshold = final_threshold + (initial_threshold - final_threshold) * (mul_coeff**3) regu_lambda = final_lambda * threshold / final_threshold return threshold, regu_lambda def regularization(model: nn.Module, mode: str): regu, counter = 0, 0 for name, param in model.named_parameters(): if "mask_scores" in name: if mode == "l1": regu += torch.norm(torch.sigmoid(param), p=1) / param.numel() elif mode == "l0": regu += torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1)).sum() / param.numel() else: ValueError("Don't know this mode.") counter += 1 return regu / counter def to_list(tensor): return tensor.detach().cpu().tolist() def train(args, train_dataset, model, tokenizer, teacher=None): """Train the model""" if args.local_rank in [-1, 0]: tb_writer = SummaryWriter(log_dir=args.output_dir) args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if "mask_score" in n and p.requires_grad], "lr": args.mask_scores_learning_rate, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and not any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if "mask_score" not in n and p.requires_grad and any(nd in n for nd in no_decay) ], "lr": args.learning_rate, "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) # Check if saved optimizer or scheduler states exist if os.path.isfile(os.path.join(args.model_name_or_path, "optimizer.pt")) and os.path.isfile( os.path.join(args.model_name_or_path, "scheduler.pt") ): # Load in optimizer and scheduler states optimizer.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "optimizer.pt"))) scheduler.load_state_dict(torch.load(os.path.join(args.model_name_or_path, "scheduler.pt"))) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True, ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) # Distillation if teacher is not None: logger.info(" Training with distillation") global_step = 1 # Global TopK if args.global_topk: threshold_mem = None epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if os.path.exists(args.model_name_or_path): # set global_step to global_step of last saved checkpoint from model path try: checkpoint_suffix = args.model_name_or_path.split("-")[-1].split("/")[0] global_step = int(checkpoint_suffix) epochs_trained = global_step // (len(train_dataloader) // args.gradient_accumulation_steps) steps_trained_in_current_epoch = global_step % (len(train_dataloader) // args.gradient_accumulation_steps) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) except ValueError: logger.info(" Starting fine-tuning.") tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange( epochs_trained, int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0] ) # Added here for reproducibility set_seed(args) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue model.train() batch = tuple(t.to(args.device) for t in batch) threshold, regu_lambda = schedule_threshold( step=global_step, total_step=t_total, warmup_steps=args.warmup_steps, final_threshold=args.final_threshold, initial_threshold=args.initial_threshold, final_warmup=args.final_warmup, initial_warmup=args.initial_warmup, final_lambda=args.final_lambda, ) # Global TopK if args.global_topk: if threshold == 1.0: threshold = -1e2 # Or an indefinitely low quantity else: if (threshold_mem is None) or (global_step % args.global_topk_frequency_compute == 0): # Sort all the values to get the global topK concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() threshold = threshold_mem else: threshold = threshold_mem inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], "start_positions": batch[3], "end_positions": batch[4], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[5], "p_mask": batch[6]}) if args.version_2_with_negative: inputs.update({"is_impossible": batch[7]}) if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) if "masked" in args.model_type: inputs["threshold"] = threshold outputs = model(**inputs) # model outputs are always tuple in transformers (see doc) loss, start_logits_stu, end_logits_stu = outputs # Distillation loss if teacher is not None: with torch.no_grad(): start_logits_tea, end_logits_tea = teacher( input_ids=inputs["input_ids"], token_type_ids=inputs["token_type_ids"], attention_mask=inputs["attention_mask"], ) loss_start = nn.functional.kl_div( input=nn.functional.log_softmax(start_logits_stu / args.temperature, dim=-1), target=nn.functional.softmax(start_logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature**2) loss_end = nn.functional.kl_div( input=nn.functional.log_softmax(end_logits_stu / args.temperature, dim=-1), target=nn.functional.softmax(end_logits_tea / args.temperature, dim=-1), reduction="batchmean", ) * (args.temperature**2) loss_logits = (loss_start + loss_end) / 2.0 loss = args.alpha_distil * loss_logits + args.alpha_ce * loss # Regularization if args.regularization is not None: regu_ = regularization(model=model, mode=args.regularization) loss = loss + regu_lambda * regu_ if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: tb_writer.add_scalar("threshold", threshold, global_step) for name, param in model.named_parameters(): if not param.requires_grad: continue tb_writer.add_scalar("parameter_mean/" + name, param.data.mean(), global_step) tb_writer.add_scalar("parameter_std/" + name, param.data.std(), global_step) tb_writer.add_scalar("parameter_min/" + name, param.data.min(), global_step) tb_writer.add_scalar("parameter_max/" + name, param.data.max(), global_step) if "pooler" in name: continue tb_writer.add_scalar("grad_mean/" + name, param.grad.data.mean(), global_step) tb_writer.add_scalar("grad_std/" + name, param.grad.data.std(), global_step) if args.regularization is not None and "mask_scores" in name: if args.regularization == "l1": perc = (torch.sigmoid(param) > threshold).sum().item() / param.numel() elif args.regularization == "l0": perc = (torch.sigmoid(param - 2 / 3 * np.log(0.1 / 1.1))).sum().item() / param.numel() tb_writer.add_scalar("retained_weights_perc/" + name, perc, global_step) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 # Log metrics if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Only evaluate when single GPU otherwise metrics may not average well if args.local_rank == -1 and args.evaluate_during_training: results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) learning_rate_scalar = scheduler.get_lr() tb_writer.add_scalar("lr", learning_rate_scalar[0], global_step) if len(learning_rate_scalar) > 1: for idx, lr in enumerate(learning_rate_scalar[1:]): tb_writer.add_scalar(f"lr/{idx+1}", lr, global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) if teacher is not None: tb_writer.add_scalar("loss/distil", loss_logits.item(), global_step) if args.regularization is not None: tb_writer.add_scalar("loss/regularization", regu_.item(), global_step) if (teacher is not None) or (args.regularization is not None): if (teacher is not None) and (args.regularization is not None): tb_writer.add_scalar( "loss/instant_ce", (loss.item() - regu_lambda * regu_.item() - args.alpha_distil * loss_logits.item()) / args.alpha_ce, global_step, ) elif teacher is not None: tb_writer.add_scalar( "loss/instant_ce", (loss.item() - args.alpha_distil * loss_logits.item()) / args.alpha_ce, global_step, ) else: tb_writer.add_scalar( "loss/instant_ce", loss.item() - regu_lambda * regu_.item(), global_step ) logging_loss = tr_loss # Save model checkpoint if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(output_dir) tokenizer.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) logger.info("Saving optimizer and scheduler states to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix=""): dataset, examples, features = load_and_cache_examples(args, tokenizer, evaluate=True, output_examples=True) if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(dataset) eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1 and not isinstance(model, nn.DataParallel): model = nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(dataset)) logger.info(" Batch size = %d", args.eval_batch_size) all_results = [] start_time = timeit.default_timer() # Global TopK if args.global_topk: threshold_mem = None for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = { "input_ids": batch[0], "attention_mask": batch[1], "token_type_ids": batch[2], } if args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] example_indices = batch[3] # XLNet and XLM use more arguments for their predictions if args.model_type in ["xlnet", "xlm"]: inputs.update({"cls_index": batch[4], "p_mask": batch[5]}) # for lang_id-sensitive xlm models if hasattr(model, "config") and hasattr(model.config, "lang2id"): inputs.update( {"langs": (torch.ones(batch[0].shape, dtype=torch.int64) * args.lang_id).to(args.device)} ) if "masked" in args.model_type: inputs["threshold"] = args.final_threshold if args.global_topk: if threshold_mem is None: concat = torch.cat( [param.view(-1) for name, param in model.named_parameters() if "mask_scores" in name] ) n = concat.numel() kth = max(n - (int(n * args.final_threshold) + 1), 1) threshold_mem = concat.kthvalue(kth).values.item() inputs["threshold"] = threshold_mem outputs = model(**inputs) for i, example_index in enumerate(example_indices): eval_feature = features[example_index.item()] unique_id = int(eval_feature.unique_id) output = [to_list(output[i]) for output in outputs] # Some models (XLNet, XLM) use 5 arguments for their predictions, while the other "simpler" # models only use two. if len(output) >= 5: start_logits = output[0] start_top_index = output[1] end_logits = output[2] end_top_index = output[3] cls_logits = output[4] result = SquadResult( unique_id, start_logits, end_logits, start_top_index=start_top_index, end_top_index=end_top_index, cls_logits=cls_logits, ) else: start_logits, end_logits = output result = SquadResult(unique_id, start_logits, end_logits) all_results.append(result) evalTime = timeit.default_timer() - start_time logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(dataset)) # Compute predictions output_prediction_file = os.path.join(args.output_dir, "predictions_{}.json".format(prefix)) output_nbest_file = os.path.join(args.output_dir, "nbest_predictions_{}.json".format(prefix)) if args.version_2_with_negative: output_null_log_odds_file = os.path.join(args.output_dir, "null_odds_{}.json".format(prefix)) else: output_null_log_odds_file = None # XLNet and XLM use a more complex post-processing procedure if args.model_type in ["xlnet", "xlm"]: start_n_top = model.config.start_n_top if hasattr(model, "config") else model.module.config.start_n_top end_n_top = model.config.end_n_top if hasattr(model, "config") else model.module.config.end_n_top predictions = compute_predictions_log_probs( examples, features, all_results, args.n_best_size, args.max_answer_length, output_prediction_file, output_nbest_file, output_null_log_odds_file, start_n_top, end_n_top, args.version_2_with_negative, tokenizer, args.verbose_logging, ) else: predictions = compute_predictions_logits( examples, features, all_results, args.n_best_size, args.max_answer_length, args.do_lower_case, output_prediction_file, output_nbest_file, output_null_log_odds_file, args.verbose_logging, args.version_2_with_negative, args.null_score_diff_threshold, tokenizer, ) # Compute the F1 and exact scores. results = squad_evaluate(examples, predictions) return results def load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False): if args.local_rank not in [-1, 0] and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() # Load data features from cache or dataset file input_dir = args.data_dir if args.data_dir else "." cached_features_file = os.path.join( input_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", args.tokenizer_name if args.tokenizer_name else list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), list(filter(None, args.predict_file.split("/"))).pop() if evaluate else list(filter(None, args.train_file.split("/"))).pop(), ), ) # Init features and dataset from cache if it exists if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features_and_dataset = torch.load(cached_features_file) features, dataset, examples = ( features_and_dataset["features"], features_and_dataset["dataset"], features_and_dataset["examples"], ) else: logger.info("Creating features from dataset file at %s", input_dir) if not args.data_dir and ((evaluate and not args.predict_file) or (not evaluate and not args.train_file)): try: import tensorflow_datasets as tfds except ImportError: raise ImportError("If not data_dir is specified, tensorflow_datasets needs to be installed.") if args.version_2_with_negative: logger.warning("tensorflow_datasets does not handle version 2 of SQuAD.") tfds_examples = tfds.load("squad") examples = SquadV1Processor().get_examples_from_dataset(tfds_examples, evaluate=evaluate) else: processor = SquadV2Processor() if args.version_2_with_negative else SquadV1Processor() if evaluate: examples = processor.get_dev_examples(args.data_dir, filename=args.predict_file) else: examples = processor.get_train_examples(args.data_dir, filename=args.train_file) features, dataset = squad_convert_examples_to_features( examples=examples, tokenizer=tokenizer, max_seq_length=args.max_seq_length, doc_stride=args.doc_stride, max_query_length=args.max_query_length, is_training=not evaluate, return_dataset="pt", threads=args.threads, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save({"features": features, "dataset": dataset, "examples": examples}, cached_features_file) if args.local_rank == 0 and not evaluate: # Make sure only the first process in distributed training process the dataset, and the others will use the cache torch.distributed.barrier() if output_examples: return dataset, examples, features return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pretrained model or model identifier from huggingface.co/models", ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model checkpoints and predictions will be written.", ) # Other parameters parser.add_argument( "--data_dir", default=None, type=str, help="The input data dir. Should contain the .json files for the task." + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--train_file", default=None, type=str, help="The input training file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--predict_file", default=None, type=str, help="The input evaluation file. If a data dir is specified, will look for the file there" + "If no data dir or train/predict files are specified, will run with tensorflow_datasets.", ) parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--version_2_with_negative", action="store_true", help="If true, the SQuAD examples contain some that do not have an answer.", ) parser.add_argument( "--null_score_diff_threshold", type=float, default=0.0, help="If null_score - best_non_null is greater than the threshold predict null.", ) parser.add_argument( "--max_seq_length", default=384, type=int, help=( "The maximum total input sequence length after WordPiece tokenization. Sequences " "longer than this will be truncated, and sequences shorter than this will be padded." ), ) parser.add_argument( "--doc_stride", default=128, type=int, help="When splitting up a long document into chunks, how much stride to take between chunks.", ) parser.add_argument( "--max_query_length", default=64, type=int, help=( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ), ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Run evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") # Pruning parameters parser.add_argument( "--mask_scores_learning_rate", default=1e-2, type=float, help="The Adam initial learning rate of the mask scores.", ) parser.add_argument( "--initial_threshold", default=1.0, type=float, help="Initial value of the threshold (for scheduling)." ) parser.add_argument( "--final_threshold", default=0.7, type=float, help="Final value of the threshold (for scheduling)." ) parser.add_argument( "--initial_warmup", default=1, type=int, help=( "Run `initial_warmup` * `warmup_steps` steps of threshold warmup during which threshold stays" "at its `initial_threshold` value (sparsity schedule)." ), ) parser.add_argument( "--final_warmup", default=2, type=int, help=( "Run `final_warmup` * `warmup_steps` steps of threshold cool-down during which threshold stays" "at its final_threshold value (sparsity schedule)." ), ) parser.add_argument( "--pruning_method", default="topK", type=str, help=( "Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning," " sigmoied_threshold = Soft movement pruning)." ), ) parser.add_argument( "--mask_init", default="constant", type=str, help="Initialization method for the mask scores. Choices: constant, uniform, kaiming.", ) parser.add_argument( "--mask_scale", default=0.0, type=float, help="Initialization parameter for the chosen initialization method." ) parser.add_argument("--regularization", default=None, help="Add L0 or L1 regularization to the mask scores.") parser.add_argument( "--final_lambda", default=0.0, type=float, help="Regularization intensity (used in conjunction with `regularization`.", ) parser.add_argument("--global_topk", action="store_true", help="Global TopK on the Scores.") parser.add_argument( "--global_topk_frequency_compute", default=25, type=int, help="Frequency at which we compute the TopK global threshold.", ) # Distillation parameters (optional) parser.add_argument( "--teacher_type", default=None, type=str, help=( "Teacher type. Teacher tokenizer and student (model) tokenizer must output the same tokenization. Only for" " distillation." ), ) parser.add_argument( "--teacher_name_or_path", default=None, type=str, help="Path to the already SQuAD fine-tuned teacher model. Only for distillation.", ) parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Cross entropy loss linear weight. Only for distillation." ) parser.add_argument( "--alpha_distil", default=0.5, type=float, help="Distillation loss linear weight. Only for distillation." ) parser.add_argument( "--temperature", default=2.0, type=float, help="Distillation temperature. Only for distillation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.", ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument( "--n_best_size", default=20, type=int, help="The total number of n-best predictions to generate in the nbest_predictions.json output file.", ) parser.add_argument( "--max_answer_length", default=30, type=int, help=( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ), ) parser.add_argument( "--verbose_logging", action="store_true", help=( "If true, all of the warnings related to data processing will be printed. " "A number of warnings are expected for a normal SQuAD evaluation." ), ) parser.add_argument( "--lang_id", default=0, type=int, help=( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ), ) parser.add_argument("--logging_steps", type=int, default=500, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=500, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Whether not to use CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help=( "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html" ), ) parser.add_argument("--server_ip", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--server_port", type=str, default="", help="Can be used for distant debugging.") parser.add_argument("--threads", type=int, default=1, help="multiple threads for converting example to features") args = parser.parse_args() # Regularization if args.regularization == "null": args.regularization = None if args.doc_stride >= args.max_seq_length - args.max_query_length: logger.warning( "WARNING - You've set a doc stride which may be superior to the document length in some " "examples. This could result in errors when building features from the examples. Please reduce the doc " "stride or increase the maximum length to ensure the features are correctly built." ) if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count() else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set seed set_seed(args) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, cache_dir=args.cache_dir if args.cache_dir else None, pruning_method=args.pruning_method, mask_init=args.mask_init, mask_scale=args.mask_scale, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.teacher_type is not None: assert args.teacher_name_or_path is not None assert args.alpha_distil > 0.0 assert args.alpha_distil + args.alpha_ce > 0.0 teacher_config_class, teacher_model_class, _ = MODEL_CLASSES[args.teacher_type] teacher_config = teacher_config_class.from_pretrained(args.teacher_name_or_path) teacher = teacher_model_class.from_pretrained( args.teacher_name_or_path, from_tf=False, config=teacher_config, cache_dir=args.cache_dir if args.cache_dir else None, ) teacher.to(args.device) else: teacher = None if args.local_rank == 0: # Make sure only the first process in distributed training will download model & vocab torch.distributed.barrier() model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Before we do anything with models, we want to ensure that we get fp16 execution of torch.einsum if args.fp16 is set. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. Note that running `--fp16_opt_level="O2"` will # remove the need for this code, but it is still valid. if args.fp16: try: import apex apex.amp.register_half_function(torch, "einsum") except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") # Training if args.do_train: train_dataset = load_and_cache_examples(args, tokenizer, evaluate=False, output_examples=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer, teacher=teacher) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) # Save the trained model and the tokenizer if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` # Take care of distributed/parallel training model_to_save = model.module if hasattr(model, "module") else model model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) # , force_download=True) tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) model.to(args.device) # Evaluation - we can ask to evaluate all the checkpoints (sub-directories) in a directory results = {} if args.do_eval and args.local_rank in [-1, 0]: if args.do_train: logger.info("Loading checkpoints saved during training for evaluation") checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = [ os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ] else: logger.info("Loading checkpoint %s for evaluation", args.model_name_or_path) checkpoints = [args.model_name_or_path] logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: # Reload the model global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" model = model_class.from_pretrained(checkpoint) # , force_download=True) model.to(args.device) # Evaluate result = evaluate(args, model, tokenizer, prefix=global_step) result = {k + ("_{}".format(global_step) if global_step else ""): v for k, v in result.items()} results.update(result) logger.info("Results: {}".format(results)) predict_file = list(filter(None, args.predict_file.split("/"))).pop() if not os.path.exists(os.path.join(args.output_dir, predict_file)): os.makedirs(os.path.join(args.output_dir, predict_file)) output_eval_file = os.path.join(args.output_dir, predict_file, "eval_results.txt") with open(output_eval_file, "w") as writer: for key in sorted(results.keys()): writer.write("%s = %s\n" % (key, str(results[key]))) return results if __name__ == "__main__": main()
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/utils/dummy_flax_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class FlaxForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGenerationMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessorList(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTemperatureLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopKLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopPLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_MASKED_LM_MAPPING = None FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None FLAX_MODEL_FOR_PRETRAINING_MAPPING = None FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = None FLAX_MODEL_MAPPING = None class FlaxAutoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForVision2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartDecoderPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForMaskedImageModeling(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2LMHeadModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianMTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlaxXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"])
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends class FlaxForcedBOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxForcedEOSTokenLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGenerationMixin(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsProcessorList(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMinLengthLogitsProcessor(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTemperatureLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopKLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxTopPLogitsWarper(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAlbertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_MODEL_FOR_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_MASKED_LM_MAPPING = None FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING = None FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING = None FLAX_MODEL_FOR_PRETRAINING_MAPPING = None FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING = None FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING = None FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING = None FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING = None FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING = None FLAX_MODEL_MAPPING = None class FlaxAutoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSeq2SeqLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForSpeechSeq2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxAutoModelForVision2Seq(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartDecoderPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitForMaskedImageModeling(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBeitPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForNextSentencePrediction(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBigBirdPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxBlenderbotSmallPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPTextPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxCLIPVisionPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxDistilBertPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxElectraPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2LMHeadModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPT2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTNeoPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxGPTJPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxLongT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianMTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMarianPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMBartPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxMT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxOPTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxPegasusPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRobertaPreLayerNormPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxRoFormerPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxSpeechEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5EncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5ForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxT5PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionEncoderDecoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxVisionTextDualEncoderModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTForImageClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxViTPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForCTC(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2ForPreTraining(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2Model(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWav2Vec2PreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperForConditionalGeneration(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxWhisperPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXGLMPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST = None class FlaxXLMRobertaForCausalLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMaskedLM(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForMultipleChoice(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForQuestionAnswering(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForSequenceClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaForTokenClassification(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"]) class FlaxXLMRobertaPreTrainedModel(metaclass=DummyObject): _backends = ["flax"] def __init__(self, *args, **kwargs): requires_backends(self, ["flax"])
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/xlm_prophetnet/test_tokenization_xlm_prophetnet.py
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class XLMProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLMProphetNetTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "[PAD]" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "[PAD]") self.assertEqual(vocab_keys[1], "[CLS]") self.assertEqual(vocab_keys[-1], "j") self.assertEqual(len(vocab_keys), 1_012) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_012) def test_full_tokenizer(self): tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ], ) @cached_property def big_tokenizer(self): return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [35389, 6672, 49, 2] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="microsoft/xprophetnet-large-wiki100-cased", revision="1acad1643ddd54a44df6a1b797ada8373685d90e", )
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, The Microsoft Research team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") @require_sentencepiece class XLMProphetNetTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = XLMProphetNetTokenizer test_rust_tokenizer = False test_sentencepiece = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_convert_token_and_id(self): """Test ``_convert_token_to_id`` and ``_convert_id_to_token``.""" token = "[PAD]" token_id = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(token), token_id) self.assertEqual(self.get_tokenizer()._convert_id_to_token(token_id), token) def test_get_vocab(self): vocab_keys = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0], "[PAD]") self.assertEqual(vocab_keys[1], "[CLS]") self.assertEqual(vocab_keys[-1], "j") self.assertEqual(len(vocab_keys), 1_012) def test_vocab_size(self): self.assertEqual(self.get_tokenizer().vocab_size, 1_012) def test_full_tokenizer(self): tokenizer = XLMProphetNetTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [ value + tokenizer.fairseq_offset for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4] ], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "[UNK]", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "[UNK]", ".", ], ) @cached_property def big_tokenizer(self): return XLMProphetNetTokenizer.from_pretrained("microsoft/xprophetnet-large-wiki100-cased") @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [35389, 6672, 49, 2] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenizer_integration(self): # fmt: off expected_encoding = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=expected_encoding, model_name="microsoft/xprophetnet-large-wiki100-cased", revision="1acad1643ddd54a44df6a1b797ada8373685d90e", )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/transfo_xl/test_tokenization_transfo_xl.py
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = TransfoXLTokenizer test_rust_tokenizer = False test_seq2seq = False def setUp(self): super().setUp() vocab_tokens = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): kwargs["lower_case"] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "<unk> UNwanted , running" output_text = "<unk> unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True) tokens = tokenizer.tokenize("<unk> UNwanted , running") self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7]) def test_full_tokenizer_lower(self): tokenizer = TransfoXLTokenizer(lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"] ) def test_full_tokenizer_no_lower(self): tokenizer = TransfoXLTokenizer(lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_full_tokenizer_moses_numbers(self): tokenizer = TransfoXLTokenizer(lower_case=False) text_in = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" tokens_out = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(text_in), tokens_out) self.assertEqual(tokenizer.convert_tokens_to_string(tokens_out), text_in) def test_move_added_token(self): tokenizer = self.get_tokenizer() original_len = len(tokenizer) tokenizer.add_tokens(["new1", "new2"]) tokenizer.move_added_token("new1", 1) # Check that moved token is not copied (duplicate) self.assertEqual(len(tokenizer), original_len + 2) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1"), [1]) self.assertEqual(tokenizer.decode([1]), "new1")
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers.models.transfo_xl.tokenization_transfo_xl import VOCAB_FILES_NAMES, TransfoXLTokenizer from ...test_tokenization_common import TokenizerTesterMixin class TransfoXLTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = TransfoXLTokenizer test_rust_tokenizer = False test_seq2seq = False def setUp(self): super().setUp() vocab_tokens = [ "<unk>", "[CLS]", "[SEP]", "want", "unwanted", "wa", "un", "running", ",", "low", "l", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): kwargs["lower_case"] = True return TransfoXLTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "<unk> UNwanted , running" output_text = "<unk> unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = TransfoXLTokenizer(vocab_file=self.vocab_file, lower_case=True) tokens = tokenizer.tokenize("<unk> UNwanted , running") self.assertListEqual(tokens, ["<unk>", "unwanted", ",", "running"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [0, 4, 8, 7]) def test_full_tokenizer_lower(self): tokenizer = TransfoXLTokenizer(lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["hello", "!", "how", "are", "you", "?"] ) def test_full_tokenizer_no_lower(self): tokenizer = TransfoXLTokenizer(lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo ! how \n Are yoU ? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_full_tokenizer_moses_numbers(self): tokenizer = TransfoXLTokenizer(lower_case=False) text_in = "Hello (bracket) and side-scrolled [and] Henry's $5,000 with 3.34 m. What's up!?" tokens_out = [ "Hello", "(", "bracket", ")", "and", "side", "@-@", "scrolled", "[", "and", "]", "Henry", "'s", "$", "5", "@,@", "000", "with", "3", "@.@", "34", "m", ".", "What", "'s", "up", "!", "?", ] self.assertListEqual(tokenizer.tokenize(text_in), tokens_out) self.assertEqual(tokenizer.convert_tokens_to_string(tokens_out), text_in) def test_move_added_token(self): tokenizer = self.get_tokenizer() original_len = len(tokenizer) tokenizer.add_tokens(["new1", "new2"]) tokenizer.move_added_token("new1", 1) # Check that moved token is not copied (duplicate) self.assertEqual(len(tokenizer), original_len + 2) # Check that token is moved to specified id self.assertEqual(tokenizer.encode("new1"), [1]) self.assertEqual(tokenizer.decode([1]), "new1")
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/wavlm/modeling_wavlm.py
# coding=utf-8 # Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch WavLM model.""" import math import warnings from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import torch_int_div from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_wavlm import WavLMConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # General docstring _CONFIG_FOR_DOC = "WavLMConfig" # Base docstring _CHECKPOINT_FOR_DOC = "patrickvonplaten/wavlm-libri-clean-100h-base-plus" _EXPECTED_OUTPUT_SHAPE = [1, 292, 768] # CTC docstring _CTC_EXPECTED_OUTPUT = "'mister quilter is the aposle of the middle classes and we are glad to welcome his gospel'" _CTC_EXPECTED_LOSS = 12.51 # Frame class docstring _FRAME_CLASS_CHECKPOINT = "microsoft/wavlm-base-plus-sd" _FRAME_EXPECTED_OUTPUT = [0, 0] # Speaker Verification docstring _XVECTOR_CHECKPOINT = "microsoft/wavlm-base-plus-sv" _XVECTOR_EXPECTED_OUTPUT = 0.97 WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/wavlm-base", "microsoft/wavlm-base-plus", "microsoft/wavlm-large", # See all WavLM models at https://huggingface.co/models?filter=wavlm ] # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->WavLM class WavLMNoLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->WavLM class WavLMLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->WavLM class WavLMGroupNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->WavLM class WavLMPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) deepspeed.zero.register_external_parameter(self, self.conv.weight_v) deepspeed.zero.register_external_parameter(self, self.conv.weight_g) else: self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) self.padding = WavLMSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->WavLM class WavLMSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->WavLM class WavLMFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [ WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: if self._requires_grad and self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(conv_layer), hidden_states, ) else: hidden_states = conv_layer(hidden_states) return hidden_states class WavLMFeatureExtractor(WavLMFeatureEncoder): def __init__(self, config): super().__init__(config) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->WavLM class WavLMFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states class WavLMAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, num_buckets: int = 320, max_distance: int = 800, has_relative_position_bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.out_proj = nn.Linear(embed_dim, embed_dim) self.num_buckets = num_buckets self.max_distance = max_distance self.gru_rel_pos_const = nn.Parameter(torch.ones(1, self.num_heads, 1, 1)) self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8) if has_relative_position_bias: self.rel_attn_embed = nn.Embedding(self.num_buckets, self.num_heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_bias: Optional[torch.Tensor] = None, output_attentions: bool = False, index=0, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Attention layer with relative attention""" bsz, tgt_len, _ = hidden_states.size() # first pass of attention layer creates position bias if position_bias is None: position_bias = self.compute_bias(tgt_len, tgt_len) position_bias = ( position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, tgt_len) ) # Compute relative position bias: # 1) get reshape hidden_states gated_hidden_states = hidden_states.view(hidden_states.shape[:-1] + (self.num_heads, -1)) gated_hidden_states = gated_hidden_states.permute(0, 2, 1, 3) # 2) project hidden states relative_position_proj = self.gru_rel_pos_linear(gated_hidden_states) relative_position_proj = relative_position_proj.view(gated_hidden_states.shape[:-1] + (2, 4)).sum(-1) # 3) compute gate for position bias from projected hidden states gate_a, gate_b = torch.sigmoid(relative_position_proj).chunk(2, dim=-1) gate_output = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0 # 4) apply gate to position bias to compute gated position_bias gated_position_bias = gate_output.view(bsz * self.num_heads, -1, 1) * position_bias gated_position_bias = gated_position_bias.view((-1, tgt_len, tgt_len)) attn_output, attn_weights = self.torch_multi_head_self_attention( hidden_states, attention_mask, gated_position_bias, output_attentions ) return attn_output, attn_weights, position_bias def torch_multi_head_self_attention( self, hidden_states: torch.FloatTensor, attention_mask: Union[torch.LongTensor, torch.BoolTensor], gated_position_bias: torch.FloatTensor, output_attentions: bool, ) -> (torch.FloatTensor, torch.FloatTensor): """simple wrapper around torch's multi_head_attention_forward function""" # self-attention assumes q = k = v query = key = value = hidden_states.transpose(0, 1) key_padding_mask = attention_mask.ne(1) if attention_mask is not None else None # disable bias and add_zero_attn bias_k = bias_v = None add_zero_attn = False # PyTorch 1.3.0 has F.multi_head_attention_forward defined # so no problem with backwards compatibility attn_output, attn_weights = F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), bias_k, bias_v, add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, output_attentions, gated_position_bias, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) # [Seq_Len, Batch Size, ...] -> [Batch Size, Seq_Len, ...] attn_output = attn_output.transpose(0, 1) if attn_weights is not None: # IMPORTANT: Attention weights are averaged weights # here which should not be the case. This is an open issue # on PyTorch: https://github.com/pytorch/pytorch/issues/32590 attn_weights = attn_weights[:, None].broadcast_to( attn_weights.shape[:1] + (self.num_heads,) + attn_weights.shape[1:] ) return attn_output, attn_weights def compute_bias(self, query_length: int, key_length: int) -> torch.FloatTensor: context_position = torch.arange(query_length, dtype=torch.long)[:, None] memory_position = torch.arange(key_length, dtype=torch.long)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_positions_bucket(relative_position) relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device) values = self.rel_attn_embed(relative_position_bucket) values = values.permute([2, 0, 1]) return values def _relative_positions_bucket(self, relative_positions: torch.FloatTensor) -> torch.FloatTensor: num_buckets = self.num_buckets // 2 relative_buckets = (relative_positions > 0).to(torch.long) * num_buckets relative_positions = torch.abs(relative_positions) max_exact = num_buckets // 2 is_small = relative_positions < max_exact relative_positions_if_large = torch.log(relative_positions.float() / max_exact) relative_positions_if_large = relative_positions_if_large / math.log(self.max_distance / max_exact) relative_positions_if_large = relative_positions_if_large * (num_buckets - max_exact) relative_position_if_large = (max_exact + relative_positions_if_large).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_positions, relative_position_if_large) return relative_buckets # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->WavLM class WavLMFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class WavLMEncoderLayer(nn.Module): def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True): super().__init__() self.attention = WavLMAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, num_buckets=config.num_buckets, max_distance=config.max_bucket_distance, has_relative_position_bias=has_relative_position_bias, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = WavLMFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0): attn_residual = hidden_states hidden_states, attn_weights, position_bias = self.attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, index=index, ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states, position_bias) if output_attentions: outputs += (attn_weights,) return outputs class WavLMEncoderLayerStableLayerNorm(nn.Module): def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True): super().__init__() self.attention = WavLMAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, num_buckets=config.num_buckets, max_distance=config.max_bucket_distance, has_relative_position_bias=has_relative_position_bias, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = WavLMFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, position_bias = self.attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) outputs = (hidden_states, position_bias) if output_attentions: outputs += (attn_weights,) return outputs class WavLMEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = WavLMPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 hidden_states[~attention_mask] = 0.0 position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() position_bias = None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop) if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: # create gradient checkpointing function def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer), hidden_states, attention_mask, position_bias, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, index=i, ) hidden_states, position_bias = layer_outputs[:2] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class WavLMEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = WavLMPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [ WavLMEncoderLayerStableLayerNorm(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers) ] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens are not attended to hidden_states[~attention_mask] = 0 position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() position_bias = None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop) if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication if self.gradient_checkpointing and self.training: # create gradient checkpointing function def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer), hidden_states, attention_mask, position_bias, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, position_bias=position_bias, ) hidden_states, position_bias = layer_outputs[:2] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) class WavLMGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible" f" by `config.num_codevector_groups` {self.num_groups} " "for concatenation." ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True) codevector_probs = codevector_probs.type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->WavLM class WavLMAdapter(nn.Module): def __init__(self, config): super().__init__() # feature dim might need to be down-projected if config.output_hidden_size != config.hidden_size: self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size) else: self.proj = self.proj_layer_norm = None self.layers = nn.ModuleList(WavLMAdapterLayer(config) for _ in range(config.num_adapter_layers)) self.layerdrop = config.layerdrop def forward(self, hidden_states): # down project hidden_states if necessary if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = hidden_states.transpose(1, 2) for layer in self.layers: layerdrop_prob = np.random.random() if not self.training or (layerdrop_prob > self.layerdrop): hidden_states = layer(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->WavLM class WavLMAdapterLayer(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.output_hidden_size, 2 * config.output_hidden_size, config.adapter_kernel_size, stride=config.adapter_stride, padding=1, ) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) return hidden_states class WavLMPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = WavLMConfig base_model_prefix = "wavlm" main_input_name = "input_values" _keys_to_ignore_on_load_missing = [r"position_ids"] supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, WavLMGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, WavLMPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, WavLMFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths( self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch_int_div(input_length - kernel_size, stride) + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) output_lengths = output_lengths.to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (WavLMEncoder, WavLMEncoderStableLayerNorm, WavLMFeatureEncoder)): module.gradient_checkpointing = value WAVLM_START_DOCSTRING = r""" WavLM was proposed in [WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.). This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`WavLMConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ WAVLM_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) <Tip warning={true}> `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. </Tip> output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.", WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM, WavLMBaseModelOutput->Wav2Vec2BaseModelOutput class WavLMModel(WavLMPreTrainedModel): def __init__(self, config: WavLMConfig): super().__init__(config) self.config = config self.feature_extractor = WavLMFeatureEncoder(config) self.feature_projection = WavLMFeatureProjection(config) # model only needs masking vector if mask prob is > 0.0 if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = WavLMEncoderStableLayerNorm(config) else: self.encoder = WavLMEncoder(config) self.adapter = WavLMAdapter(config) if config.add_adapter else None # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.feature_extractor._freeze_parameters() def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForCTC(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.wavlm = WavLMModel(config) self.dropout = nn.Dropout(config.final_dropout) if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `WavLMForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, WAVLM_START_DOCSTRING, ) class WavLMForSequenceClassification(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of WavLM adapters (config.add_adapter=True)" ) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_extractor def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_encoder with wav2vec2->wavlm def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_base_model with wav2vec2->wavlm def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->WavLM, wav2vec2->wavlm def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ WavLM Model with a frame classification head on top for tasks like Speaker Diarization. """, WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForAudioFrameClassification(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Audio frame classification does not support the use of WavLM adapters (config.add_adapter=True)" ) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_FRAME_CLASS_CHECKPOINT, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_FRAME_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super(AMSoftmaxLoss, self).__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss # Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states): hidden_states = hidden_states.unsqueeze(1) hidden_states = nn.functional.unfold( hidden_states, (self.kernel_size, self.in_conv_dim), stride=(1, self.in_conv_dim), dilation=(self.dilation, 1), ) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.kernel(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states @add_start_docstrings( """ WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification. """, WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForXVector(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_XVECTOR_CHECKPOINT, output_type=XVectorOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_XVECTOR_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, XVectorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
# coding=utf-8 # Copyright 2021 The Fairseq Authors, Microsoft Research, and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch WavLM model.""" import math import warnings from typing import Optional, Tuple, Union import numpy as np import torch import torch.nn.functional as F import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...deepspeed import is_deepspeed_zero3_enabled from ...modeling_outputs import ( BaseModelOutput, CausalLMOutput, SequenceClassifierOutput, TokenClassifierOutput, Wav2Vec2BaseModelOutput, XVectorOutput, ) from ...modeling_utils import PreTrainedModel from ...pytorch_utils import torch_int_div from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_wavlm import WavLMConfig logger = logging.get_logger(__name__) _HIDDEN_STATES_START_POSITION = 2 # General docstring _CONFIG_FOR_DOC = "WavLMConfig" # Base docstring _CHECKPOINT_FOR_DOC = "patrickvonplaten/wavlm-libri-clean-100h-base-plus" _EXPECTED_OUTPUT_SHAPE = [1, 292, 768] # CTC docstring _CTC_EXPECTED_OUTPUT = "'mister quilter is the aposle of the middle classes and we are glad to welcome his gospel'" _CTC_EXPECTED_LOSS = 12.51 # Frame class docstring _FRAME_CLASS_CHECKPOINT = "microsoft/wavlm-base-plus-sd" _FRAME_EXPECTED_OUTPUT = [0, 0] # Speaker Verification docstring _XVECTOR_CHECKPOINT = "microsoft/wavlm-base-plus-sv" _XVECTOR_EXPECTED_OUTPUT = 0.97 WAVLM_PRETRAINED_MODEL_ARCHIVE_LIST = [ "microsoft/wavlm-base", "microsoft/wavlm-base-plus", "microsoft/wavlm-large", # See all WavLM models at https://huggingface.co/models?filter=wavlm ] # Copied from transformers.models.wav2vec2.modeling_wav2vec2._compute_mask_indices def _compute_mask_indices( shape: Tuple[int, int], mask_prob: float, mask_length: int, attention_mask: Optional[torch.LongTensor] = None, min_masks: int = 0, ) -> np.ndarray: """ Computes random mask spans for a given shape. Used to implement [SpecAugment: A Simple Data Augmentation Method for ASR](https://arxiv.org/abs/1904.08779). Note that this method is not optimized to run on TPU and should be run on CPU as part of the preprocessing during training. Args: shape: The shape for which to compute masks. This should be of a tuple of size 2 where the first element is the batch size and the second element is the length of the axis to span. mask_prob: The percentage of the whole axis (between 0 and 1) which will be masked. The number of independently generated mask spans of length `mask_length` is computed by `mask_prob*shape[1]/mask_length`. Note that due to overlaps, `mask_prob` is an upper bound and the actual percentage will be smaller. mask_length: size of the mask min_masks: minimum number of masked spans attention_mask: A (right-padded) attention mask which independently shortens the feature axis of each batch dimension. """ batch_size, sequence_length = shape if mask_length < 1: raise ValueError("`mask_length` has to be bigger than 0.") if mask_length > sequence_length: raise ValueError( f"`mask_length` has to be smaller than `sequence_length`, but got `mask_length`: {mask_length}" f" and `sequence_length`: {sequence_length}`" ) # epsilon is used for probabilistic rounding epsilon = np.random.rand(1).item() def compute_num_masked_span(input_length): """Given input length, compute how many spans should be masked""" num_masked_span = int(mask_prob * input_length / mask_length + epsilon) num_masked_span = max(num_masked_span, min_masks) # make sure num masked span <= sequence_length if num_masked_span * mask_length > sequence_length: num_masked_span = sequence_length // mask_length # make sure num_masked span is also <= input_length - (mask_length - 1) if input_length - (mask_length - 1) < num_masked_span: num_masked_span = max(input_length - (mask_length - 1), 0) return num_masked_span # compute number of masked spans in batch input_lengths = ( attention_mask.sum(-1).detach().tolist() if attention_mask is not None else [sequence_length for _ in range(batch_size)] ) # SpecAugment mask to fill spec_aug_mask = np.zeros((batch_size, sequence_length), dtype=bool) spec_aug_mask_idxs = [] max_num_masked_span = compute_num_masked_span(sequence_length) if max_num_masked_span == 0: return spec_aug_mask for input_length in input_lengths: # compute num of masked spans for this input num_masked_span = compute_num_masked_span(input_length) # get random indices to mask spec_aug_mask_idx = np.random.choice( np.arange(input_length - (mask_length - 1)), num_masked_span, replace=False ) # pick first sampled index that will serve as a dummy index to pad vector # to ensure same dimension for all batches due to probabilistic rounding # Picking first sample just pads those vectors twice. if len(spec_aug_mask_idx) == 0: # this case can only happen if `input_length` is strictly smaller then # `sequence_length` in which case the last token has to be a padding # token which we can use as a dummy mask id dummy_mask_idx = sequence_length - 1 else: dummy_mask_idx = spec_aug_mask_idx[0] spec_aug_mask_idx = np.concatenate( [spec_aug_mask_idx, np.ones(max_num_masked_span - num_masked_span, dtype=np.int32) * dummy_mask_idx] ) spec_aug_mask_idxs.append(spec_aug_mask_idx) spec_aug_mask_idxs = np.array(spec_aug_mask_idxs) # expand masked indices to masked spans spec_aug_mask_idxs = np.broadcast_to( spec_aug_mask_idxs[:, :, None], (batch_size, max_num_masked_span, mask_length) ) spec_aug_mask_idxs = spec_aug_mask_idxs.reshape(batch_size, max_num_masked_span * mask_length) # add offset to the starting indexes so that indexes now create a span offsets = np.arange(mask_length)[None, None, :] offsets = np.broadcast_to(offsets, (batch_size, max_num_masked_span, mask_length)).reshape( batch_size, max_num_masked_span * mask_length ) spec_aug_mask_idxs = spec_aug_mask_idxs + offsets # ensure that we cannot have indices larger than sequence_length if spec_aug_mask_idxs.max() > sequence_length - 1: spec_aug_mask_idxs[spec_aug_mask_idxs > sequence_length - 1] = sequence_length - 1 # scatter indices to mask np.put_along_axis(spec_aug_mask, spec_aug_mask_idxs, 1, -1) return spec_aug_mask # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2NoLayerNormConvLayer with Wav2Vec2->WavLM class WavLMNoLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2LayerNormConvLayer with Wav2Vec2->WavLM class WavLMLayerNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.layer_norm = nn.LayerNorm(self.out_conv_dim, elementwise_affine=True) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states.transpose(-2, -1) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2GroupNormConvLayer with Wav2Vec2->WavLM class WavLMGroupNormConvLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.conv_dim[layer_id - 1] if layer_id > 0 else 1 self.out_conv_dim = config.conv_dim[layer_id] self.conv = nn.Conv1d( self.in_conv_dim, self.out_conv_dim, kernel_size=config.conv_kernel[layer_id], stride=config.conv_stride[layer_id], bias=config.conv_bias, ) self.activation = ACT2FN[config.feat_extract_activation] self.layer_norm = nn.GroupNorm(num_groups=self.out_conv_dim, num_channels=self.out_conv_dim, affine=True) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = self.layer_norm(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2PositionalConvEmbedding with Wav2Vec2->WavLM class WavLMPositionalConvEmbedding(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.hidden_size, config.hidden_size, kernel_size=config.num_conv_pos_embeddings, padding=config.num_conv_pos_embeddings // 2, groups=config.num_conv_pos_embedding_groups, ) if is_deepspeed_zero3_enabled(): import deepspeed with deepspeed.zero.GatheredParameters(self.conv.weight, modifier_rank=0): self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) deepspeed.zero.register_external_parameter(self, self.conv.weight_v) deepspeed.zero.register_external_parameter(self, self.conv.weight_g) else: self.conv = nn.utils.weight_norm(self.conv, name="weight", dim=2) self.padding = WavLMSamePadLayer(config.num_conv_pos_embeddings) self.activation = ACT2FN[config.feat_extract_activation] def forward(self, hidden_states): hidden_states = hidden_states.transpose(1, 2) hidden_states = self.conv(hidden_states) hidden_states = self.padding(hidden_states) hidden_states = self.activation(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2SamePadLayer with Wav2Vec2->WavLM class WavLMSamePadLayer(nn.Module): def __init__(self, num_conv_pos_embeddings): super().__init__() self.num_pad_remove = 1 if num_conv_pos_embeddings % 2 == 0 else 0 def forward(self, hidden_states): if self.num_pad_remove > 0: hidden_states = hidden_states[:, :, : -self.num_pad_remove] return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureEncoder with Wav2Vec2->WavLM class WavLMFeatureEncoder(nn.Module): """Construct the features from raw audio waveform""" def __init__(self, config): super().__init__() if config.feat_extract_norm == "group": conv_layers = [WavLMGroupNormConvLayer(config, layer_id=0)] + [ WavLMNoLayerNormConvLayer(config, layer_id=i + 1) for i in range(config.num_feat_extract_layers - 1) ] elif config.feat_extract_norm == "layer": conv_layers = [WavLMLayerNormConvLayer(config, layer_id=i) for i in range(config.num_feat_extract_layers)] else: raise ValueError( f"`config.feat_extract_norm` is {config.feat_extract_norm}, but has to be one of ['group', 'layer']" ) self.conv_layers = nn.ModuleList(conv_layers) self.gradient_checkpointing = False self._requires_grad = True def _freeze_parameters(self): for param in self.parameters(): param.requires_grad = False self._requires_grad = False def forward(self, input_values): hidden_states = input_values[:, None] # make sure hidden_states require grad for gradient_checkpointing if self._requires_grad and self.training: hidden_states.requires_grad = True for conv_layer in self.conv_layers: if self._requires_grad and self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs) return custom_forward hidden_states = torch.utils.checkpoint.checkpoint( create_custom_forward(conv_layer), hidden_states, ) else: hidden_states = conv_layer(hidden_states) return hidden_states class WavLMFeatureExtractor(WavLMFeatureEncoder): def __init__(self, config): super().__init__(config) warnings.warn( f"The class `{self.__class__.__name__}` has been depreciated " "and will be removed in Transformers v5. " f"Use `{self.__class__.__bases__[0].__name__}` instead.", FutureWarning, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeatureProjection with Wav2Vec2->WavLM class WavLMFeatureProjection(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.conv_dim[-1], eps=config.layer_norm_eps) self.projection = nn.Linear(config.conv_dim[-1], config.hidden_size) self.dropout = nn.Dropout(config.feat_proj_dropout) def forward(self, hidden_states): # non-projected hidden states are needed for quantization norm_hidden_states = self.layer_norm(hidden_states) hidden_states = self.projection(norm_hidden_states) hidden_states = self.dropout(hidden_states) return hidden_states, norm_hidden_states class WavLMAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, num_buckets: int = 320, max_distance: int = 800, has_relative_position_bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads if (self.head_dim * num_heads) != self.embed_dim: raise ValueError( f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}" f" and `num_heads`: {num_heads})." ) self.scaling = self.head_dim**-0.5 self.k_proj = nn.Linear(embed_dim, embed_dim) self.v_proj = nn.Linear(embed_dim, embed_dim) self.q_proj = nn.Linear(embed_dim, embed_dim) self.out_proj = nn.Linear(embed_dim, embed_dim) self.num_buckets = num_buckets self.max_distance = max_distance self.gru_rel_pos_const = nn.Parameter(torch.ones(1, self.num_heads, 1, 1)) self.gru_rel_pos_linear = nn.Linear(self.head_dim, 8) if has_relative_position_bias: self.rel_attn_embed = nn.Embedding(self.num_buckets, self.num_heads) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, position_bias: Optional[torch.Tensor] = None, output_attentions: bool = False, index=0, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Attention layer with relative attention""" bsz, tgt_len, _ = hidden_states.size() # first pass of attention layer creates position bias if position_bias is None: position_bias = self.compute_bias(tgt_len, tgt_len) position_bias = ( position_bias.unsqueeze(0).repeat(bsz, 1, 1, 1).view(bsz * self.num_heads, tgt_len, tgt_len) ) # Compute relative position bias: # 1) get reshape hidden_states gated_hidden_states = hidden_states.view(hidden_states.shape[:-1] + (self.num_heads, -1)) gated_hidden_states = gated_hidden_states.permute(0, 2, 1, 3) # 2) project hidden states relative_position_proj = self.gru_rel_pos_linear(gated_hidden_states) relative_position_proj = relative_position_proj.view(gated_hidden_states.shape[:-1] + (2, 4)).sum(-1) # 3) compute gate for position bias from projected hidden states gate_a, gate_b = torch.sigmoid(relative_position_proj).chunk(2, dim=-1) gate_output = gate_a * (gate_b * self.gru_rel_pos_const - 1.0) + 2.0 # 4) apply gate to position bias to compute gated position_bias gated_position_bias = gate_output.view(bsz * self.num_heads, -1, 1) * position_bias gated_position_bias = gated_position_bias.view((-1, tgt_len, tgt_len)) attn_output, attn_weights = self.torch_multi_head_self_attention( hidden_states, attention_mask, gated_position_bias, output_attentions ) return attn_output, attn_weights, position_bias def torch_multi_head_self_attention( self, hidden_states: torch.FloatTensor, attention_mask: Union[torch.LongTensor, torch.BoolTensor], gated_position_bias: torch.FloatTensor, output_attentions: bool, ) -> (torch.FloatTensor, torch.FloatTensor): """simple wrapper around torch's multi_head_attention_forward function""" # self-attention assumes q = k = v query = key = value = hidden_states.transpose(0, 1) key_padding_mask = attention_mask.ne(1) if attention_mask is not None else None # disable bias and add_zero_attn bias_k = bias_v = None add_zero_attn = False # PyTorch 1.3.0 has F.multi_head_attention_forward defined # so no problem with backwards compatibility attn_output, attn_weights = F.multi_head_attention_forward( query, key, value, self.embed_dim, self.num_heads, torch.empty([0]), torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)), bias_k, bias_v, add_zero_attn, self.dropout, self.out_proj.weight, self.out_proj.bias, self.training, key_padding_mask, output_attentions, gated_position_bias, use_separate_proj_weight=True, q_proj_weight=self.q_proj.weight, k_proj_weight=self.k_proj.weight, v_proj_weight=self.v_proj.weight, ) # [Seq_Len, Batch Size, ...] -> [Batch Size, Seq_Len, ...] attn_output = attn_output.transpose(0, 1) if attn_weights is not None: # IMPORTANT: Attention weights are averaged weights # here which should not be the case. This is an open issue # on PyTorch: https://github.com/pytorch/pytorch/issues/32590 attn_weights = attn_weights[:, None].broadcast_to( attn_weights.shape[:1] + (self.num_heads,) + attn_weights.shape[1:] ) return attn_output, attn_weights def compute_bias(self, query_length: int, key_length: int) -> torch.FloatTensor: context_position = torch.arange(query_length, dtype=torch.long)[:, None] memory_position = torch.arange(key_length, dtype=torch.long)[None, :] relative_position = memory_position - context_position relative_position_bucket = self._relative_positions_bucket(relative_position) relative_position_bucket = relative_position_bucket.to(self.rel_attn_embed.weight.device) values = self.rel_attn_embed(relative_position_bucket) values = values.permute([2, 0, 1]) return values def _relative_positions_bucket(self, relative_positions: torch.FloatTensor) -> torch.FloatTensor: num_buckets = self.num_buckets // 2 relative_buckets = (relative_positions > 0).to(torch.long) * num_buckets relative_positions = torch.abs(relative_positions) max_exact = num_buckets // 2 is_small = relative_positions < max_exact relative_positions_if_large = torch.log(relative_positions.float() / max_exact) relative_positions_if_large = relative_positions_if_large / math.log(self.max_distance / max_exact) relative_positions_if_large = relative_positions_if_large * (num_buckets - max_exact) relative_position_if_large = (max_exact + relative_positions_if_large).to(torch.long) relative_position_if_large = torch.min( relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1) ) relative_buckets += torch.where(is_small, relative_positions, relative_position_if_large) return relative_buckets # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2FeedForward with Wav2Vec2->WavLM class WavLMFeedForward(nn.Module): def __init__(self, config): super().__init__() self.intermediate_dropout = nn.Dropout(config.activation_dropout) self.intermediate_dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act self.output_dense = nn.Linear(config.intermediate_size, config.hidden_size) self.output_dropout = nn.Dropout(config.hidden_dropout) def forward(self, hidden_states): hidden_states = self.intermediate_dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) hidden_states = self.intermediate_dropout(hidden_states) hidden_states = self.output_dense(hidden_states) hidden_states = self.output_dropout(hidden_states) return hidden_states class WavLMEncoderLayer(nn.Module): def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True): super().__init__() self.attention = WavLMAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, num_buckets=config.num_buckets, max_distance=config.max_bucket_distance, has_relative_position_bias=has_relative_position_bias, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = WavLMFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False, index=0): attn_residual = hidden_states hidden_states, attn_weights, position_bias = self.attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, index=index, ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states = hidden_states + self.feed_forward(hidden_states) hidden_states = self.final_layer_norm(hidden_states) outputs = (hidden_states, position_bias) if output_attentions: outputs += (attn_weights,) return outputs class WavLMEncoderLayerStableLayerNorm(nn.Module): def __init__(self, config: WavLMConfig, has_relative_position_bias: bool = True): super().__init__() self.attention = WavLMAttention( embed_dim=config.hidden_size, num_heads=config.num_attention_heads, dropout=config.attention_dropout, num_buckets=config.num_buckets, max_distance=config.max_bucket_distance, has_relative_position_bias=has_relative_position_bias, ) self.dropout = nn.Dropout(config.hidden_dropout) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.feed_forward = WavLMFeedForward(config) self.final_layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, attention_mask=None, position_bias=None, output_attentions=False): attn_residual = hidden_states hidden_states = self.layer_norm(hidden_states) hidden_states, attn_weights, position_bias = self.attention( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, ) hidden_states = self.dropout(hidden_states) hidden_states = attn_residual + hidden_states hidden_states = hidden_states + self.feed_forward(self.final_layer_norm(hidden_states)) outputs = (hidden_states, position_bias) if output_attentions: outputs += (attn_weights,) return outputs class WavLMEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = WavLMPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [WavLMEncoderLayer(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers)] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens output 0 hidden_states[~attention_mask] = 0.0 position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.layer_norm(hidden_states) hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() position_bias = None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop) if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync if self.gradient_checkpointing and self.training: # create gradient checkpointing function def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer), hidden_states, attention_mask, position_bias, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, position_bias=position_bias, output_attentions=output_attentions, index=i, ) hidden_states, position_bias = layer_outputs[:2] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions, ) class WavLMEncoderStableLayerNorm(nn.Module): def __init__(self, config): super().__init__() self.config = config self.pos_conv_embed = WavLMPositionalConvEmbedding(config) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout) self.layers = nn.ModuleList( [ WavLMEncoderLayerStableLayerNorm(config, has_relative_position_bias=(i == 0)) for i in range(config.num_hidden_layers) ] ) self.gradient_checkpointing = False def forward( self, hidden_states, attention_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None if attention_mask is not None: # make sure padded tokens are not attended to hidden_states[~attention_mask] = 0 position_embeddings = self.pos_conv_embed(hidden_states) hidden_states = hidden_states + position_embeddings hidden_states = self.dropout(hidden_states) deepspeed_zero3_is_enabled = is_deepspeed_zero3_enabled() position_bias = None for i, layer in enumerate(self.layers): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = np.random.uniform(0, 1) skip_the_layer = self.training and i > 0 and (dropout_probability < self.config.layerdrop) if not skip_the_layer or deepspeed_zero3_is_enabled: # under deepspeed zero3 all gpus must run in sync # XXX: could optimize this like synced_gpus in generate_utils but not sure if it's worth the code complication if self.gradient_checkpointing and self.training: # create gradient checkpointing function def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer), hidden_states, attention_mask, position_bias, ) else: layer_outputs = layer( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, position_bias=position_bias, ) hidden_states, position_bias = layer_outputs[:2] if skip_the_layer: layer_outputs = (None, None) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_self_attentions ) class WavLMGumbelVectorQuantizer(nn.Module): """ Vector quantization using gumbel softmax. See [CATEGORICAL REPARAMETERIZATION WITH GUMBEL-SOFTMAX](https://arxiv.org/pdf/1611.01144.pdf) for more information. """ def __init__(self, config): super().__init__() self.num_groups = config.num_codevector_groups self.num_vars = config.num_codevectors_per_group if config.codevector_dim % self.num_groups != 0: raise ValueError( f"`config.codevector_dim {config.codevector_dim} must be divisible" f" by `config.num_codevector_groups` {self.num_groups} " "for concatenation." ) # storage for codebook variables (codewords) self.codevectors = nn.Parameter( torch.FloatTensor(1, self.num_groups * self.num_vars, config.codevector_dim // self.num_groups) ) self.weight_proj = nn.Linear(config.conv_dim[-1], self.num_groups * self.num_vars) # can be decayed for training self.temperature = 2 @staticmethod def _compute_perplexity(probs): marginal_probs = probs.mean(dim=0) perplexity = torch.exp(-torch.sum(marginal_probs * torch.log(marginal_probs + 1e-7), dim=-1)).sum() return perplexity def forward(self, hidden_states): batch_size, sequence_length, hidden_size = hidden_states.shape # project to codevector dim hidden_states = self.weight_proj(hidden_states) hidden_states = hidden_states.view(batch_size * sequence_length * self.num_groups, -1) if self.training: # sample code vector probs via gumbel in differentiateable way codevector_probs = nn.functional.gumbel_softmax(hidden_states.float(), tau=self.temperature, hard=True) codevector_probs = codevector_probs.type_as(hidden_states) # compute perplexity codevector_soft_dist = torch.softmax( hidden_states.view(batch_size * sequence_length, self.num_groups, -1).float(), dim=-1 ) perplexity = self._compute_perplexity(codevector_soft_dist) else: # take argmax in non-differentiable way # comptute hard codevector distribution (one hot) codevector_idx = hidden_states.argmax(dim=-1) codevector_probs = hidden_states.new_zeros(*hidden_states.shape).scatter_( -1, codevector_idx.view(-1, 1), 1.0 ) codevector_probs = codevector_probs.view(batch_size * sequence_length, self.num_groups, -1) perplexity = self._compute_perplexity(codevector_probs) codevector_probs = codevector_probs.view(batch_size * sequence_length, -1) # use probs to retrieve codevectors codevectors_per_group = codevector_probs.unsqueeze(-1) * self.codevectors codevectors = codevectors_per_group.view(batch_size * sequence_length, self.num_groups, self.num_vars, -1) codevectors = codevectors.sum(-2).view(batch_size, sequence_length, -1) return codevectors, perplexity # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Adapter with Wav2Vec2->WavLM class WavLMAdapter(nn.Module): def __init__(self, config): super().__init__() # feature dim might need to be down-projected if config.output_hidden_size != config.hidden_size: self.proj = nn.Linear(config.hidden_size, config.output_hidden_size) self.proj_layer_norm = nn.LayerNorm(config.output_hidden_size) else: self.proj = self.proj_layer_norm = None self.layers = nn.ModuleList(WavLMAdapterLayer(config) for _ in range(config.num_adapter_layers)) self.layerdrop = config.layerdrop def forward(self, hidden_states): # down project hidden_states if necessary if self.proj is not None and self.proj_layer_norm is not None: hidden_states = self.proj(hidden_states) hidden_states = self.proj_layer_norm(hidden_states) hidden_states = hidden_states.transpose(1, 2) for layer in self.layers: layerdrop_prob = np.random.random() if not self.training or (layerdrop_prob > self.layerdrop): hidden_states = layer(hidden_states) hidden_states = hidden_states.transpose(1, 2) return hidden_states # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2AdapterLayer with Wav2Vec2->WavLM class WavLMAdapterLayer(nn.Module): def __init__(self, config): super().__init__() self.conv = nn.Conv1d( config.output_hidden_size, 2 * config.output_hidden_size, config.adapter_kernel_size, stride=config.adapter_stride, padding=1, ) def forward(self, hidden_states): hidden_states = self.conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) return hidden_states class WavLMPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = WavLMConfig base_model_prefix = "wavlm" main_input_name = "input_values" _keys_to_ignore_on_load_missing = [r"position_ids"] supports_gradient_checkpointing = True def _init_weights(self, module): """Initialize the weights""" # gumbel softmax requires special init if isinstance(module, WavLMGumbelVectorQuantizer): module.weight_proj.weight.data.normal_(mean=0.0, std=1) module.weight_proj.bias.data.zero_() nn.init.uniform_(module.codevectors) elif isinstance(module, WavLMPositionalConvEmbedding): nn.init.normal_( module.conv.weight, mean=0, std=2 * math.sqrt(1 / (module.conv.kernel_size[0] * module.conv.in_channels)), ) nn.init.constant_(module.conv.bias, 0) elif isinstance(module, WavLMFeatureProjection): k = math.sqrt(1 / module.projection.in_features) nn.init.uniform_(module.projection.weight, a=-k, b=k) nn.init.uniform_(module.projection.bias, a=-k, b=k) elif isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, (nn.LayerNorm, nn.GroupNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, nn.Conv1d): nn.init.kaiming_normal_(module.weight) if module.bias is not None: k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0])) nn.init.uniform_(module.bias, a=-k, b=k) def _get_feat_extract_output_lengths( self, input_lengths: Union[torch.LongTensor, int], add_adapter: Optional[bool] = None ): """ Computes the output length of the convolutional layers """ add_adapter = self.config.add_adapter if add_adapter is None else add_adapter def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return torch_int_div(input_length - kernel_size, stride) + 1 for kernel_size, stride in zip(self.config.conv_kernel, self.config.conv_stride): input_lengths = _conv_out_length(input_lengths, kernel_size, stride) if add_adapter: for _ in range(self.config.num_adapter_layers): input_lengths = _conv_out_length(input_lengths, 1, self.config.adapter_stride) return input_lengths def _get_feature_vector_attention_mask( self, feature_vector_length: int, attention_mask: torch.LongTensor, add_adapter=None ): # Effectively attention_mask.sum(-1), but not inplace to be able to run # on inference mode. non_padded_lengths = attention_mask.cumsum(dim=-1)[:, -1] output_lengths = self._get_feat_extract_output_lengths(non_padded_lengths, add_adapter=add_adapter) output_lengths = output_lengths.to(torch.long) batch_size = attention_mask.shape[0] attention_mask = torch.zeros( (batch_size, feature_vector_length), dtype=attention_mask.dtype, device=attention_mask.device ) # these two operations makes sure that all values before the output lengths idxs are attended to attention_mask[(torch.arange(attention_mask.shape[0], device=attention_mask.device), output_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).bool() return attention_mask def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, (WavLMEncoder, WavLMEncoderStableLayerNorm, WavLMFeatureEncoder)): module.gradient_checkpointing = value WAVLM_START_DOCSTRING = r""" WavLM was proposed in [WavLM: Unified Speech Representation Learning with Labeled and Unlabeled Data](https://arxiv.org/abs/2101.07597) by Chengyi Wang, Yu Wu, Yao Qian, Kenichi Kumatani, Shujie Liu, Furu Wei, Michael Zeng, Xuedong Huang. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving etc.). This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`WavLMConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. """ WAVLM_INPUTS_DOCSTRING = r""" Args: input_values (`torch.FloatTensor` of shape `(batch_size, sequence_length)`): Float values of input raw speech waveform. Values can be obtained by loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via the soundfile library (`pip install soundfile`). To prepare the array into `input_values`, the [`AutoProcessor`] should be used for padding and conversion into a tensor of type `torch.FloatTensor`. See [`Wav2Vec2Processor.__call__`] for details. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention masks?](../glossary#attention-mask) <Tip warning={true}> `attention_mask` should only be passed if the corresponding processor has `config.return_attention_mask == True`. For all models whose processor has `config.return_attention_mask == False`, `attention_mask` should **not** be passed to avoid degraded performance when doing batched inference. For such models `input_values` should simply be padded with 0 and passed without `attention_mask`. Be aware that these models also yield slightly different results depending on whether `input_values` is padded or not. </Tip> output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. """ @add_start_docstrings( "The bare WavLM Model transformer outputting raw hidden-states without any specific head on top.", WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2Model with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM, WavLMBaseModelOutput->Wav2Vec2BaseModelOutput class WavLMModel(WavLMPreTrainedModel): def __init__(self, config: WavLMConfig): super().__init__(config) self.config = config self.feature_extractor = WavLMFeatureEncoder(config) self.feature_projection = WavLMFeatureProjection(config) # model only needs masking vector if mask prob is > 0.0 if config.mask_time_prob > 0.0 or config.mask_feature_prob > 0.0: self.masked_spec_embed = nn.Parameter(torch.FloatTensor(config.hidden_size).uniform_()) if config.do_stable_layer_norm: self.encoder = WavLMEncoderStableLayerNorm(config) else: self.encoder = WavLMEncoder(config) self.adapter = WavLMAdapter(config) if config.add_adapter else None # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.feature_extractor._freeze_parameters() def _mask_hidden_states( self, hidden_states: torch.FloatTensor, mask_time_indices: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.LongTensor] = None, ): """ Masks extracted features along time axis and/or along feature axis according to [SpecAugment](https://arxiv.org/abs/1904.08779). """ # `config.apply_spec_augment` can set masking to False if not getattr(self.config, "apply_spec_augment", True): return hidden_states # generate indices & apply SpecAugment along time axis batch_size, sequence_length, hidden_size = hidden_states.size() if mask_time_indices is not None: # apply SpecAugment along time axis with given mask_time_indices hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) elif self.config.mask_time_prob > 0 and self.training: mask_time_indices = _compute_mask_indices( (batch_size, sequence_length), mask_prob=self.config.mask_time_prob, mask_length=self.config.mask_time_length, attention_mask=attention_mask, min_masks=self.config.mask_time_min_masks, ) mask_time_indices = torch.tensor(mask_time_indices, device=hidden_states.device, dtype=torch.bool) hidden_states[mask_time_indices] = self.masked_spec_embed.to(hidden_states.dtype) if self.config.mask_feature_prob > 0 and self.training: # generate indices & apply SpecAugment along feature axis mask_feature_indices = _compute_mask_indices( (batch_size, hidden_size), mask_prob=self.config.mask_feature_prob, mask_length=self.config.mask_feature_length, min_masks=self.config.mask_feature_min_masks, ) mask_feature_indices = torch.tensor(mask_feature_indices, device=hidden_states.device, dtype=torch.bool) mask_feature_indices = mask_feature_indices[:, None].expand(-1, sequence_length, -1) hidden_states[mask_feature_indices] = 0 return hidden_states @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=Wav2Vec2BaseModelOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_EXPECTED_OUTPUT_SHAPE, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, mask_time_indices: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, Wav2Vec2BaseModelOutput]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict extract_features = self.feature_extractor(input_values) extract_features = extract_features.transpose(1, 2) if attention_mask is not None: # compute reduced attention_mask corresponding to feature vectors attention_mask = self._get_feature_vector_attention_mask( extract_features.shape[1], attention_mask, add_adapter=False ) hidden_states, extract_features = self.feature_projection(extract_features) hidden_states = self._mask_hidden_states( hidden_states, mask_time_indices=mask_time_indices, attention_mask=attention_mask ) encoder_outputs = self.encoder( hidden_states, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = encoder_outputs[0] if self.adapter is not None: hidden_states = self.adapter(hidden_states) if not return_dict: return (hidden_states, extract_features) + encoder_outputs[1:] return Wav2Vec2BaseModelOutput( last_hidden_state=hidden_states, extract_features=extract_features, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """WavLM Model with a `language modeling` head on top for Connectionist Temporal Classification (CTC).""", WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForCTC with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForCTC(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.wavlm = WavLMModel(config) self.dropout = nn.Dropout(config.final_dropout) if config.vocab_size is None: raise ValueError( f"You are trying to instantiate {self.__class__} with a configuration that " "does not define the vocabulary size of the language model head. Please " "instantiate the model as follows: `WavLMForCTC.from_pretrained(..., vocab_size=vocab_size)`. " "or define `vocab_size` of your model's configuration." ) output_hidden_size = ( config.output_hidden_size if hasattr(config, "add_adapter") and config.add_adapter else config.hidden_size ) self.lm_head = nn.Linear(output_hidden_size, config.vocab_size) # Initialize weights and apply final processing self.post_init() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=CausalLMOutput, config_class=_CONFIG_FOR_DOC, expected_output=_CTC_EXPECTED_OUTPUT, expected_loss=_CTC_EXPECTED_LOSS, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, CausalLMOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size, target_length)`, *optional*): Labels for connectionist temporal classification. Note that `target_length` has to be smaller or equal to the sequence length of the output logits. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]`. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) hidden_states = outputs[0] hidden_states = self.dropout(hidden_states) logits = self.lm_head(hidden_states) loss = None if labels is not None: if labels.max() >= self.config.vocab_size: raise ValueError(f"Label values must be <= vocab_size: {self.config.vocab_size}") # retrieve loss input_lengths from attention_mask attention_mask = ( attention_mask if attention_mask is not None else torch.ones_like(input_values, dtype=torch.long) ) input_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(-1)).to(torch.long) # assuming that padded tokens are filled with -100 # when not being attended to labels_mask = labels >= 0 target_lengths = labels_mask.sum(-1) flattened_targets = labels.masked_select(labels_mask) # ctc_loss doesn't support fp16 log_probs = nn.functional.log_softmax(logits, dim=-1, dtype=torch.float32).transpose(0, 1) with torch.backends.cudnn.flags(enabled=False): loss = nn.functional.ctc_loss( log_probs, flattened_targets, input_lengths, target_lengths, blank=self.config.pad_token_id, reduction=self.config.ctc_loss_reduction, zero_infinity=self.config.ctc_zero_infinity, ) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return CausalLMOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions ) @add_start_docstrings( """ WavLM Model with a sequence classification head on top (a linear layer over the pooled output) for tasks like SUPERB Keyword Spotting. """, WAVLM_START_DOCSTRING, ) class WavLMForSequenceClassification(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Sequence classification does not support the use of WavLM adapters (config.add_adapter=True)" ) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.classifier_proj_size) self.classifier = nn.Linear(config.classifier_proj_size, config.num_labels) # Initialize weights and apply final processing self.post_init() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_extractor def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameters will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_feature_encoder with wav2vec2->wavlm def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.freeze_base_model with wav2vec2->wavlm def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForSequenceClassification.forward with Wav2Vec2->WavLM, wav2vec2->wavlm def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, SequenceClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) if attention_mask is None: pooled_output = hidden_states.mean(dim=1) else: padding_mask = self._get_feature_vector_attention_mask(hidden_states.shape[1], attention_mask) hidden_states[~padding_mask] = 0.0 pooled_output = hidden_states.sum(dim=1) / padding_mask.sum(dim=1).view(-1, 1) logits = self.classifier(pooled_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ WavLM Model with a frame classification head on top for tasks like Speaker Diarization. """, WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForAudioFrameClassification with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForAudioFrameClassification(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) if hasattr(config, "add_adapter") and config.add_adapter: raise ValueError( "Audio frame classification does not support the use of WavLM adapters (config.add_adapter=True)" ) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.num_labels = config.num_labels self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_FRAME_CLASS_CHECKPOINT, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_FRAME_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, labels: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, TokenClassifierOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] logits = self.classifier(hidden_states) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), torch.argmax(labels.view(-1, self.num_labels), axis=1)) if not return_dict: output = (logits,) + outputs[_HIDDEN_STATES_START_POSITION:] return output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.AMSoftmaxLoss class AMSoftmaxLoss(nn.Module): def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4): super(AMSoftmaxLoss, self).__init__() self.scale = scale self.margin = margin self.num_labels = num_labels self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True) self.loss = nn.CrossEntropyLoss() def forward(self, hidden_states, labels): labels = labels.flatten() weight = nn.functional.normalize(self.weight, dim=0) hidden_states = nn.functional.normalize(hidden_states, dim=1) cos_theta = torch.mm(hidden_states, weight) psi = cos_theta - self.margin onehot = nn.functional.one_hot(labels, self.num_labels) logits = self.scale * torch.where(onehot.bool(), psi, cos_theta) loss = self.loss(logits, labels) return loss # Copied from transformers.models.wav2vec2.modeling_wav2vec2.TDNNLayer class TDNNLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.in_conv_dim = config.tdnn_dim[layer_id - 1] if layer_id > 0 else config.tdnn_dim[layer_id] self.out_conv_dim = config.tdnn_dim[layer_id] self.kernel_size = config.tdnn_kernel[layer_id] self.dilation = config.tdnn_dilation[layer_id] self.kernel = nn.Linear(self.in_conv_dim * self.kernel_size, self.out_conv_dim) self.activation = nn.ReLU() def forward(self, hidden_states): hidden_states = hidden_states.unsqueeze(1) hidden_states = nn.functional.unfold( hidden_states, (self.kernel_size, self.in_conv_dim), stride=(1, self.in_conv_dim), dilation=(self.dilation, 1), ) hidden_states = hidden_states.transpose(1, 2) hidden_states = self.kernel(hidden_states) hidden_states = self.activation(hidden_states) return hidden_states @add_start_docstrings( """ WavLM Model with an XVector feature extraction head on top for tasks like Speaker Verification. """, WAVLM_START_DOCSTRING, ) # Copied from transformers.models.wav2vec2.modeling_wav2vec2.Wav2Vec2ForXVector with Wav2Vec2->WavLM, wav2vec2->wavlm, WAV_2_VEC_2->WAVLM class WavLMForXVector(WavLMPreTrainedModel): def __init__(self, config): super().__init__(config) self.wavlm = WavLMModel(config) num_layers = config.num_hidden_layers + 1 # transformer layers + input embeddings if config.use_weighted_layer_sum: self.layer_weights = nn.Parameter(torch.ones(num_layers) / num_layers) self.projector = nn.Linear(config.hidden_size, config.tdnn_dim[0]) tdnn_layers = [TDNNLayer(config, i) for i in range(len(config.tdnn_dim))] self.tdnn = nn.ModuleList(tdnn_layers) self.feature_extractor = nn.Linear(config.tdnn_dim[-1] * 2, config.xvector_output_dim) self.classifier = nn.Linear(config.xvector_output_dim, config.xvector_output_dim) self.objective = AMSoftmaxLoss(config.xvector_output_dim, config.num_labels) self.init_weights() def freeze_feature_extractor(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ warnings.warn( "The method `freeze_feature_extractor` is deprecated and will be removed in Transformers v5." "Please use the equivalent `freeze_feature_encoder` method instead.", FutureWarning, ) self.freeze_feature_encoder() def freeze_feature_encoder(self): """ Calling this function will disable the gradient computation for the feature encoder so that its parameter will not be updated during training. """ self.wavlm.feature_extractor._freeze_parameters() def freeze_base_model(self): """ Calling this function will disable the gradient computation for the base model so that its parameters will not be updated during training. Only the classification head will be updated. """ for param in self.wavlm.parameters(): param.requires_grad = False def _get_tdnn_output_lengths(self, input_lengths: Union[torch.LongTensor, int]): """ Computes the output length of the TDNN layers """ def _conv_out_length(input_length, kernel_size, stride): # 1D convolutional layer output length formula taken # from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html return (input_length - kernel_size) // stride + 1 for kernel_size in self.config.tdnn_kernel: input_lengths = _conv_out_length(input_lengths, kernel_size, 1) return input_lengths @add_start_docstrings_to_model_forward(WAVLM_INPUTS_DOCSTRING) @add_code_sample_docstrings( checkpoint=_XVECTOR_CHECKPOINT, output_type=XVectorOutput, config_class=_CONFIG_FOR_DOC, modality="audio", expected_output=_XVECTOR_EXPECTED_OUTPUT, ) def forward( self, input_values: Optional[torch.Tensor], attention_mask: Optional[torch.Tensor] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, labels: Optional[torch.Tensor] = None, ) -> Union[Tuple, XVectorOutput]: r""" labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If `config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict output_hidden_states = True if self.config.use_weighted_layer_sum else output_hidden_states outputs = self.wavlm( input_values, attention_mask=attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if self.config.use_weighted_layer_sum: hidden_states = outputs[_HIDDEN_STATES_START_POSITION] hidden_states = torch.stack(hidden_states, dim=1) norm_weights = nn.functional.softmax(self.layer_weights, dim=-1) hidden_states = (hidden_states * norm_weights.view(-1, 1, 1)).sum(dim=1) else: hidden_states = outputs[0] hidden_states = self.projector(hidden_states) for tdnn_layer in self.tdnn: hidden_states = tdnn_layer(hidden_states) # Statistic Pooling if attention_mask is None: mean_features = hidden_states.mean(dim=1) std_features = hidden_states.std(dim=1) else: feat_extract_output_lengths = self._get_feat_extract_output_lengths(attention_mask.sum(dim=1)) tdnn_output_lengths = self._get_tdnn_output_lengths(feat_extract_output_lengths) mean_features = [] std_features = [] for i, length in enumerate(tdnn_output_lengths): mean_features.append(hidden_states[i, :length].mean(dim=0)) std_features.append(hidden_states[i, :length].std(dim=0)) mean_features = torch.stack(mean_features) std_features = torch.stack(std_features) statistic_pooling = torch.cat([mean_features, std_features], dim=-1) output_embeddings = self.feature_extractor(statistic_pooling) logits = self.classifier(output_embeddings) loss = None if labels is not None: loss = self.objective(logits, labels) if not return_dict: output = (logits, output_embeddings) + outputs[_HIDDEN_STATES_START_POSITION:] return ((loss,) + output) if loss is not None else output return XVectorOutput( loss=loss, logits=logits, embeddings=output_embeddings, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./tests/models/plbart/__init__.py
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/esm/openfold_utils/tensor_utils.py
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Any, Callable, Dict, List, Type, TypeVar, Union, overload import torch import torch.nn as nn import torch.types def add(m1: torch.Tensor, m2: torch.Tensor, inplace: bool) -> torch.Tensor: # The first operation in a checkpoint can't be in-place, but it's # nice to have in-place addition during inference. Thus... if not inplace: m1 = m1 + m2 else: m1 += m2 return m1 def permute_final_dims(tensor: torch.Tensor, inds: List[int]) -> torch.Tensor: zero_index = -1 * len(inds) first_inds = list(range(len(tensor.shape[:zero_index]))) return tensor.permute(first_inds + [zero_index + i for i in inds]) def flatten_final_dims(t: torch.Tensor, no_dims: int) -> torch.Tensor: return t.reshape(t.shape[:-no_dims] + (-1,)) def masked_mean(mask: torch.Tensor, value: torch.Tensor, dim: int, eps: float = 1e-4) -> torch.Tensor: mask = mask.expand(*value.shape) return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim)) def pts_to_distogram( pts: torch.Tensor, min_bin: torch.types.Number = 2.3125, max_bin: torch.types.Number = 21.6875, no_bins: int = 64 ) -> torch.Tensor: boundaries = torch.linspace(min_bin, max_bin, no_bins - 1, device=pts.device) dists = torch.sqrt(torch.sum((pts.unsqueeze(-2) - pts.unsqueeze(-3)) ** 2, dim=-1)) return torch.bucketize(dists, boundaries) def dict_multimap(fn: Callable[[list], Any], dicts: List[dict]) -> dict: first = dicts[0] new_dict = {} for k, v in first.items(): all_v = [d[k] for d in dicts] if isinstance(v, dict): new_dict[k] = dict_multimap(fn, all_v) else: new_dict[k] = fn(all_v) return new_dict def one_hot(x: torch.Tensor, v_bins: torch.Tensor) -> torch.Tensor: reshaped_bins = v_bins.view(((1,) * len(x.shape)) + (len(v_bins),)) diffs = x[..., None] - reshaped_bins am = torch.argmin(torch.abs(diffs), dim=-1) return nn.functional.one_hot(am, num_classes=len(v_bins)).float() def batched_gather(data: torch.Tensor, inds: torch.Tensor, dim: int = 0, no_batch_dims: int = 0) -> torch.Tensor: ranges: List[Union[slice, torch.Tensor]] = [] for i, s in enumerate(data.shape[:no_batch_dims]): r = torch.arange(s) r = r.view(*(*((1,) * i), -1, *((1,) * (len(inds.shape) - i - 1)))) ranges.append(r) remaining_dims: List[Union[slice, torch.Tensor]] = [slice(None) for _ in range(len(data.shape) - no_batch_dims)] remaining_dims[dim - no_batch_dims if dim >= 0 else dim] = inds ranges.extend(remaining_dims) # Matt note: Editing this to get around the behaviour of using a list as an array index changing # in recent Numpy versions return data[tuple(ranges)] T = TypeVar("T") # With tree_map, a poor man's JAX tree_map def dict_map( fn: Callable[[T], Any], dic: Dict[Any, Union[dict, list, tuple, T]], leaf_type: Type[T] ) -> Dict[Any, Union[dict, list, tuple, Any]]: new_dict: Dict[Any, Union[dict, list, tuple, Any]] = {} for k, v in dic.items(): if isinstance(v, dict): new_dict[k] = dict_map(fn, v, leaf_type) else: new_dict[k] = tree_map(fn, v, leaf_type) return new_dict @overload def tree_map(fn: Callable[[T], Any], tree: T, leaf_type: Type[T]) -> Any: ... @overload def tree_map(fn: Callable[[T], Any], tree: dict, leaf_type: Type[T]) -> dict: ... @overload def tree_map(fn: Callable[[T], Any], tree: list, leaf_type: Type[T]) -> list: ... @overload def tree_map(fn: Callable[[T], Any], tree: tuple, leaf_type: Type[T]) -> tuple: ... def tree_map(fn, tree, leaf_type): if isinstance(tree, dict): return dict_map(fn, tree, leaf_type) elif isinstance(tree, list): return [tree_map(fn, x, leaf_type) for x in tree] elif isinstance(tree, tuple): return tuple(tree_map(fn, x, leaf_type) for x in tree) elif isinstance(tree, leaf_type): return fn(tree) else: print(type(tree)) raise ValueError("Not supported") tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from functools import partial from typing import Any, Callable, Dict, List, Type, TypeVar, Union, overload import torch import torch.nn as nn import torch.types def add(m1: torch.Tensor, m2: torch.Tensor, inplace: bool) -> torch.Tensor: # The first operation in a checkpoint can't be in-place, but it's # nice to have in-place addition during inference. Thus... if not inplace: m1 = m1 + m2 else: m1 += m2 return m1 def permute_final_dims(tensor: torch.Tensor, inds: List[int]) -> torch.Tensor: zero_index = -1 * len(inds) first_inds = list(range(len(tensor.shape[:zero_index]))) return tensor.permute(first_inds + [zero_index + i for i in inds]) def flatten_final_dims(t: torch.Tensor, no_dims: int) -> torch.Tensor: return t.reshape(t.shape[:-no_dims] + (-1,)) def masked_mean(mask: torch.Tensor, value: torch.Tensor, dim: int, eps: float = 1e-4) -> torch.Tensor: mask = mask.expand(*value.shape) return torch.sum(mask * value, dim=dim) / (eps + torch.sum(mask, dim=dim)) def pts_to_distogram( pts: torch.Tensor, min_bin: torch.types.Number = 2.3125, max_bin: torch.types.Number = 21.6875, no_bins: int = 64 ) -> torch.Tensor: boundaries = torch.linspace(min_bin, max_bin, no_bins - 1, device=pts.device) dists = torch.sqrt(torch.sum((pts.unsqueeze(-2) - pts.unsqueeze(-3)) ** 2, dim=-1)) return torch.bucketize(dists, boundaries) def dict_multimap(fn: Callable[[list], Any], dicts: List[dict]) -> dict: first = dicts[0] new_dict = {} for k, v in first.items(): all_v = [d[k] for d in dicts] if isinstance(v, dict): new_dict[k] = dict_multimap(fn, all_v) else: new_dict[k] = fn(all_v) return new_dict def one_hot(x: torch.Tensor, v_bins: torch.Tensor) -> torch.Tensor: reshaped_bins = v_bins.view(((1,) * len(x.shape)) + (len(v_bins),)) diffs = x[..., None] - reshaped_bins am = torch.argmin(torch.abs(diffs), dim=-1) return nn.functional.one_hot(am, num_classes=len(v_bins)).float() def batched_gather(data: torch.Tensor, inds: torch.Tensor, dim: int = 0, no_batch_dims: int = 0) -> torch.Tensor: ranges: List[Union[slice, torch.Tensor]] = [] for i, s in enumerate(data.shape[:no_batch_dims]): r = torch.arange(s) r = r.view(*(*((1,) * i), -1, *((1,) * (len(inds.shape) - i - 1)))) ranges.append(r) remaining_dims: List[Union[slice, torch.Tensor]] = [slice(None) for _ in range(len(data.shape) - no_batch_dims)] remaining_dims[dim - no_batch_dims if dim >= 0 else dim] = inds ranges.extend(remaining_dims) # Matt note: Editing this to get around the behaviour of using a list as an array index changing # in recent Numpy versions return data[tuple(ranges)] T = TypeVar("T") # With tree_map, a poor man's JAX tree_map def dict_map( fn: Callable[[T], Any], dic: Dict[Any, Union[dict, list, tuple, T]], leaf_type: Type[T] ) -> Dict[Any, Union[dict, list, tuple, Any]]: new_dict: Dict[Any, Union[dict, list, tuple, Any]] = {} for k, v in dic.items(): if isinstance(v, dict): new_dict[k] = dict_map(fn, v, leaf_type) else: new_dict[k] = tree_map(fn, v, leaf_type) return new_dict @overload def tree_map(fn: Callable[[T], Any], tree: T, leaf_type: Type[T]) -> Any: ... @overload def tree_map(fn: Callable[[T], Any], tree: dict, leaf_type: Type[T]) -> dict: ... @overload def tree_map(fn: Callable[[T], Any], tree: list, leaf_type: Type[T]) -> list: ... @overload def tree_map(fn: Callable[[T], Any], tree: tuple, leaf_type: Type[T]) -> tuple: ... def tree_map(fn, tree, leaf_type): if isinstance(tree, dict): return dict_map(fn, tree, leaf_type) elif isinstance(tree, list): return [tree_map(fn, x, leaf_type) for x in tree] elif isinstance(tree, tuple): return tuple(tree_map(fn, x, leaf_type) for x in tree) elif isinstance(tree, leaf_type): return fn(tree) else: print(type(tree)) raise ValueError("Not supported") tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/vit_hybrid/convert_vit_hybrid_timm_to_pytorch.py
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ViT hybrid checkpoints from the timm library.""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, base_model=False): rename_keys = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token")) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings")) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias")) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight")) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight")) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias")) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias")) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias")) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, base_model=False): for i in range(config.num_hidden_layers): if base_model: prefix = "" else: prefix = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def remove_classification_head_(state_dict): ignore_keys = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our ViT structure. """ # define default ViT hybrid configuration backbone_config = BitConfig( global_padding="same", layer_type="bottleneck", depths=(3, 4, 9), out_features=["stage3"], embedding_dynamic_padding=True, ) config = ViTHybridConfig(backbone_config=backbone_config, image_size=384, num_labels=1000) base_model = False # load original model from timm timm_model = timm.create_model(vit_name, pretrained=True) timm_model.eval() # load state_dict of original model, remove and rename some keys state_dict = timm_model.state_dict() if base_model: remove_classification_head_(state_dict) rename_keys = create_rename_keys(config, base_model) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, base_model) repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # load HuggingFace model if vit_name[-5:] == "in21k": model = ViTHybridModel(config).eval() else: model = ViTHybridForImageClassification(config).eval() model.load_state_dict(state_dict) # create image processor transform = create_transform(**resolve_data_config({}, model=timm_model)) timm_transforms = transform.transforms pillow_resamplings = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } processor = ViTHybridImageProcessor( do_resize=True, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=True, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=True, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) image = prepare_img() timm_pixel_values = transform(image).unsqueeze(0) pixel_values = processor(image, return_tensors="pt").pixel_values # verify pixel values assert torch.allclose(timm_pixel_values, pixel_values) # verify logits with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print("Predicted class:", logits.argmax(-1).item()) if base_model: timm_pooled_output = timm_model.forward_features(pixel_values) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(timm_pooled_output, outputs.pooler_output, atol=1e-3) else: timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor to the hub {vit_name}") model.push_to_hub(f"ybelkada/{vit_name}") processor.push_to_hub(f"ybelkada/{vit_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) args = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
# coding=utf-8 # Copyright 2022 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ViT hybrid checkpoints from the timm library.""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from timm.data import resolve_data_config from timm.data.transforms_factory import create_transform from transformers import ( BitConfig, ViTHybridConfig, ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel, ) from transformers.image_utils import PILImageResampling from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) # here we list all keys to be renamed (original name on the left, our name on the right) def create_rename_keys(config, base_model=False): rename_keys = [] # fmt: off # stem: rename_keys.append(("cls_token", "vit.embeddings.cls_token")) rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings")) rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight")) rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias")) # backbone rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight")) rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight")) rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias")) for stage_idx in range(len(config.backbone_config.depths)): for layer_idx in range(config.backbone_config.depths[stage_idx]): rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight")) rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias")) # transformer encoder for i in range(config.num_hidden_layers): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight")) rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias")) rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight")) rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias")) rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight")) rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias")) rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias")) rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight")) rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias")) if base_model: # layernorm + pooler rename_keys.extend( [ ("norm.weight", "layernorm.weight"), ("norm.bias", "layernorm.bias"), ("pre_logits.fc.weight", "pooler.dense.weight"), ("pre_logits.fc.bias", "pooler.dense.bias"), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" rename_keys = [(pair[0], pair[1][4:]) if pair[1].startswith("vit") else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("norm.weight", "vit.layernorm.weight"), ("norm.bias", "vit.layernorm.bias"), ("head.weight", "classifier.weight"), ("head.bias", "classifier.bias"), ] ) # fmt: on return rename_keys # we split up the matrix of each encoder layer into queries, keys and values def read_in_q_k_v(state_dict, config, base_model=False): for i in range(config.num_hidden_layers): if base_model: prefix = "" else: prefix = "vit." # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) in_proj_weight = state_dict.pop(f"blocks.{i}.attn.qkv.weight") in_proj_bias = state_dict.pop(f"blocks.{i}.attn.qkv.bias") # next, add query, keys and values (in that order) to the state dict state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.weight"] = in_proj_weight[ : config.hidden_size, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.query.bias"] = in_proj_bias[: config.hidden_size] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.weight"] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.key.bias"] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.weight"] = in_proj_weight[ -config.hidden_size :, : ] state_dict[f"{prefix}encoder.layer.{i}.attention.attention.value.bias"] = in_proj_bias[-config.hidden_size :] def remove_classification_head_(state_dict): ignore_keys = ["head.weight", "head.bias"] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val # We will verify our results on an image of cute cats def prepare_img(): url = "http://images.cocodataset.org/val2017/000000039769.jpg" im = Image.open(requests.get(url, stream=True).raw) return im @torch.no_grad() def convert_vit_checkpoint(vit_name, pytorch_dump_folder_path, push_to_hub=False): """ Copy/paste/tweak model's weights to our ViT structure. """ # define default ViT hybrid configuration backbone_config = BitConfig( global_padding="same", layer_type="bottleneck", depths=(3, 4, 9), out_features=["stage3"], embedding_dynamic_padding=True, ) config = ViTHybridConfig(backbone_config=backbone_config, image_size=384, num_labels=1000) base_model = False # load original model from timm timm_model = timm.create_model(vit_name, pretrained=True) timm_model.eval() # load state_dict of original model, remove and rename some keys state_dict = timm_model.state_dict() if base_model: remove_classification_head_(state_dict) rename_keys = create_rename_keys(config, base_model) for src, dest in rename_keys: rename_key(state_dict, src, dest) read_in_q_k_v(state_dict, config, base_model) repo_id = "huggingface/label-files" filename = "imagenet-1k-id2label.json" id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r")) id2label = {int(k): v for k, v in id2label.items()} config.id2label = id2label config.label2id = {v: k for k, v in id2label.items()} # load HuggingFace model if vit_name[-5:] == "in21k": model = ViTHybridModel(config).eval() else: model = ViTHybridForImageClassification(config).eval() model.load_state_dict(state_dict) # create image processor transform = create_transform(**resolve_data_config({}, model=timm_model)) timm_transforms = transform.transforms pillow_resamplings = { "bilinear": PILImageResampling.BILINEAR, "bicubic": PILImageResampling.BICUBIC, "nearest": PILImageResampling.NEAREST, } processor = ViTHybridImageProcessor( do_resize=True, size={"shortest_edge": timm_transforms[0].size}, resample=pillow_resamplings[timm_transforms[0].interpolation.value], do_center_crop=True, crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]}, do_normalize=True, image_mean=timm_transforms[-1].mean.tolist(), image_std=timm_transforms[-1].std.tolist(), ) image = prepare_img() timm_pixel_values = transform(image).unsqueeze(0) pixel_values = processor(image, return_tensors="pt").pixel_values # verify pixel values assert torch.allclose(timm_pixel_values, pixel_values) # verify logits with torch.no_grad(): outputs = model(pixel_values) logits = outputs.logits print("Predicted class:", logits.argmax(-1).item()) if base_model: timm_pooled_output = timm_model.forward_features(pixel_values) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(timm_pooled_output, outputs.pooler_output, atol=1e-3) else: timm_logits = timm_model(pixel_values) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(timm_logits, outputs.logits, atol=1e-3) print("Looks ok!") if pytorch_dump_folder_path is not None: Path(pytorch_dump_folder_path).mkdir(exist_ok=True) print(f"Saving model {vit_name} to {pytorch_dump_folder_path}") model.save_pretrained(pytorch_dump_folder_path) print(f"Saving processor to {pytorch_dump_folder_path}") processor.save_pretrained(pytorch_dump_folder_path) if push_to_hub: print(f"Pushing model and processor to the hub {vit_name}") model.push_to_hub(f"ybelkada/{vit_name}") processor.push_to_hub(f"ybelkada/{vit_name}") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_r50_s16_384", type=str, help="Name of the hybrid ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) parser.add_argument( "--push_to_hub", action="store_true", help="Whether to upload the model to the HuggingFace hub." ) args = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/utils/dummy_timm_and_vision_objects.py
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConditionalDetrForObjectDetection(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class ConditionalDetrForSegmentation(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class ConditionalDetrModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class ConditionalDetrPreTrainedModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DeformableDetrForObjectDetection(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DeformableDetrModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DeformableDetrPreTrainedModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DetrForObjectDetection(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DetrForSegmentation(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DetrModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DetrPreTrainedModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TableTransformerForObjectDetection(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class TableTransformerModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class TableTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"])
# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class ConditionalDetrForObjectDetection(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class ConditionalDetrForSegmentation(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class ConditionalDetrModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class ConditionalDetrPreTrainedModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) DEFORMABLE_DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DeformableDetrForObjectDetection(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DeformableDetrModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DeformableDetrPreTrainedModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) DETR_PRETRAINED_MODEL_ARCHIVE_LIST = None class DetrForObjectDetection(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DetrForSegmentation(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DetrModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class DetrPreTrainedModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = None class TableTransformerForObjectDetection(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class TableTransformerModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"]) class TableTransformerPreTrainedModel(metaclass=DummyObject): _backends = ["timm", "vision"] def __init__(self, *args, **kwargs): requires_backends(self, ["timm", "vision"])
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/squeezebert/configuration_squeezebert.py
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SqueezeBERT model configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/config.json" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/config.json", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/config.json" ), } class SqueezeBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SqueezeBERT [squeezebert/squeezebert-uncased](https://huggingface.co/squeezebert/squeezebert-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SqueezeBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): pad_token_id (`int`, *optional*, defaults to 0): The ID of the token in the word embedding to use as padding. embedding_size (`int`, *optional*, defaults to 768): The dimension of the word embedding vectors. q_groups (`int`, *optional*, defaults to 4): The number of groups in Q layer. k_groups (`int`, *optional*, defaults to 4): The number of groups in K layer. v_groups (`int`, *optional*, defaults to 4): The number of groups in V layer. post_attention_groups (`int`, *optional*, defaults to 1): The number of groups in the first feed forward network layer. intermediate_groups (`int`, *optional*, defaults to 4): The number of groups in the second feed forward network layer. output_groups (`int`, *optional*, defaults to 4): The number of groups in the third feed forward network layer. Examples: ```python >>> from transformers import SqueezeBertConfig, SqueezeBertModel >>> # Initializing a SqueezeBERT configuration >>> configuration = SqueezeBertConfig() >>> # Initializing a model (with random weights) from the configuration above >>> model = SqueezeBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints. """ pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = "squeezebert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, embedding_size=768, q_groups=4, k_groups=4, v_groups=4, post_attention_groups=1, intermediate_groups=4, output_groups=4, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.embedding_size = embedding_size self.q_groups = q_groups self.k_groups = k_groups self.v_groups = v_groups self.post_attention_groups = post_attention_groups self.intermediate_groups = intermediate_groups self.output_groups = output_groups # # Copied from transformers.models.bert.configuration_bert.BertOnxxConfig with Bert->SqueezeBert class SqueezeBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ SqueezeBERT model configuration""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging logger = logging.get_logger(__name__) SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "squeezebert/squeezebert-uncased": ( "https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/config.json" ), "squeezebert/squeezebert-mnli": "https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/config.json", "squeezebert/squeezebert-mnli-headless": ( "https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/config.json" ), } class SqueezeBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a [`SqueezeBertModel`]. It is used to instantiate a SqueezeBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the SqueezeBERT [squeezebert/squeezebert-uncased](https://huggingface.co/squeezebert/squeezebert-uncased) architecture. Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the documentation from [`PretrainedConfig`] for more information. Args: vocab_size (`int`, *optional*, defaults to 30522): Vocabulary size of the SqueezeBERT model. Defines the number of different tokens that can be represented by the `inputs_ids` passed when calling [`SqueezeBertModel`]. hidden_size (`int`, *optional*, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (`int`, *optional*, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (`int`, *optional*, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (`int`, *optional*, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (`str` or `Callable`, *optional*, defaults to `"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, `"relu"`, `"silu"` and `"gelu_new"` are supported. hidden_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (`int`, *optional*, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (`int`, *optional*, defaults to 2): The vocabulary size of the `token_type_ids` passed when calling [`BertModel`] or [`TFBertModel`]. initializer_range (`float`, *optional*, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (`float`, *optional*, defaults to 1e-12): pad_token_id (`int`, *optional*, defaults to 0): The ID of the token in the word embedding to use as padding. embedding_size (`int`, *optional*, defaults to 768): The dimension of the word embedding vectors. q_groups (`int`, *optional*, defaults to 4): The number of groups in Q layer. k_groups (`int`, *optional*, defaults to 4): The number of groups in K layer. v_groups (`int`, *optional*, defaults to 4): The number of groups in V layer. post_attention_groups (`int`, *optional*, defaults to 1): The number of groups in the first feed forward network layer. intermediate_groups (`int`, *optional*, defaults to 4): The number of groups in the second feed forward network layer. output_groups (`int`, *optional*, defaults to 4): The number of groups in the third feed forward network layer. Examples: ```python >>> from transformers import SqueezeBertConfig, SqueezeBertModel >>> # Initializing a SqueezeBERT configuration >>> configuration = SqueezeBertConfig() >>> # Initializing a model (with random weights) from the configuration above >>> model = SqueezeBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config ``` Attributes: pretrained_config_archive_map (Dict[str, str]): A dictionary containing all the available pre-trained checkpoints. """ pretrained_config_archive_map = SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP model_type = "squeezebert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, embedding_size=768, q_groups=4, k_groups=4, v_groups=4, post_attention_groups=1, intermediate_groups=4, output_groups=4, **kwargs, ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.embedding_size = embedding_size self.q_groups = q_groups self.k_groups = k_groups self.v_groups = v_groups self.post_attention_groups = post_attention_groups self.intermediate_groups = intermediate_groups self.output_groups = output_groups # # Copied from transformers.models.bert.configuration_bert.BertOnxxConfig with Bert->SqueezeBert class SqueezeBertOnnxConfig(OnnxConfig): @property def inputs(self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": dynamic_axis = {0: "batch", 1: "choice", 2: "sequence"} else: dynamic_axis = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
-1
huggingface/transformers
20,211
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch"
# What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
ArthurZucker
"2022-11-14T15:30:34Z"
"2023-03-01T09:49:22Z"
b29e2dcaff114762e65eaea739ba1076fc5d1c84
44e3e3fb4930298f092f336c2b7add3ebf051928
prepare for "__floordiv__ is deprecated and its behavior will change in a future version of pytorch". # What does this PR do? Should adress the `__floordiv__` warnings mentionned in #19934. Divinding torch tensor using `//` is no longer supported and has to be done via `torch.div`.
./src/transformers/models/whisper/convert_openai_to_hf.py
# Copyright 2022 The HuggingFace Inc. team and the OpenAI team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration _MODELS = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def remove_ignore_keys_(state_dict): ignore_keys = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(k, None) WHISPER_MAPPING = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def rename_keys(s_dict): keys = list(s_dict.keys()) for key in keys: new_key = key for k, v in WHISPER_MAPPING.items(): if k in key: new_key = new_key.replace(k, v) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) return s_dict def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def _download(url: str, root: str) -> bytes: os.makedirs(root, exist_ok=True) filename = os.path.basename(url) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): model_bytes = open(download_target, "rb").read() if hashlib.sha256(model_bytes).hexdigest() == expected_sha256: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024 ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) model_bytes = open(download_target, "rb").read() if hashlib.sha256(model_bytes).hexdigest() != expected_sha256: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): if ".pt" not in checkpoint_path: original_checkpoint = _download(_MODELS[checkpoint_path]) else: original_checkpoint = torch.load(checkpoint_path, map_location="cpu") dimensions = original_checkpoint["dims"] state_dict = original_checkpoint["model_state_dict"] proj_out_weights = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(state_dict) rename_keys(state_dict) tie_embeds = True ffn_dim = state_dict["decoder.layers.0.fc1.weight"].shape[0] config = WhisperConfig( vocab_size=dimensions["n_vocab"], encoder_ffn_dim=ffn_dim, decoder_ffn_dim=ffn_dim, num_mel_bins=dimensions["n_mels"], d_model=dimensions["n_audio_state"], max_target_positions=dimensions["n_text_ctx"], encoder_layers=dimensions["n_audio_layer"], encoder_attention_heads=dimensions["n_audio_head"], decoder_layers=dimensions["n_text_layer"], decoder_attention_heads=dimensions["n_text_state"], max_source_positions=dimensions["n_audio_ctx"], ) model = WhisperForConditionalGeneration(config) missing, unexpected = model.model.load_state_dict(state_dict, strict=False) if len(missing) > 0 and not set(missing) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f" but all the following weights are missing {missing}" ) if tie_embeds: model.proj_out = make_linear_from_emb(model.model.decoder.embed_tokens) else: model.proj_out.weight.data = proj_out_weights model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") args = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
# Copyright 2022 The HuggingFace Inc. team and the OpenAI team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration _MODELS = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", } def remove_ignore_keys_(state_dict): ignore_keys = ["layers", "blocks"] for k in ignore_keys: state_dict.pop(k, None) WHISPER_MAPPING = { "blocks": "layers", "mlp.0": "fc1", "mlp.2": "fc2", "mlp_ln": "final_layer_norm", ".attn.query": ".self_attn.q_proj", ".attn.key": ".self_attn.k_proj", ".attn.value": ".self_attn.v_proj", ".attn_ln": ".self_attn_layer_norm", ".attn.out": ".self_attn.out_proj", ".cross_attn.query": ".encoder_attn.q_proj", ".cross_attn.key": ".encoder_attn.k_proj", ".cross_attn.value": ".encoder_attn.v_proj", ".cross_attn_ln": ".encoder_attn_layer_norm", ".cross_attn.out": ".encoder_attn.out_proj", "decoder.ln.": "decoder.layer_norm.", "encoder.ln.": "encoder.layer_norm.", "token_embedding": "embed_tokens", "encoder.positional_embedding": "encoder.embed_positions.weight", "decoder.positional_embedding": "decoder.embed_positions.weight", "ln_post": "layer_norm", } def rename_keys(s_dict): keys = list(s_dict.keys()) for key in keys: new_key = key for k, v in WHISPER_MAPPING.items(): if k in key: new_key = new_key.replace(k, v) print(f"{key} -> {new_key}") s_dict[new_key] = s_dict.pop(key) return s_dict def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer def _download(url: str, root: str) -> bytes: os.makedirs(root, exist_ok=True) filename = os.path.basename(url) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, filename) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): model_bytes = open(download_target, "rb").read() if hashlib.sha256(model_bytes).hexdigest() == expected_sha256: return model_bytes else: warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file") with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024 ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) model_bytes = open(download_target, "rb").read() if hashlib.sha256(model_bytes).hexdigest() != expected_sha256: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes def convert_openai_whisper_to_tfms(checkpoint_path, pytorch_dump_folder_path): if ".pt" not in checkpoint_path: original_checkpoint = _download(_MODELS[checkpoint_path]) else: original_checkpoint = torch.load(checkpoint_path, map_location="cpu") dimensions = original_checkpoint["dims"] state_dict = original_checkpoint["model_state_dict"] proj_out_weights = state_dict["decoder.token_embedding.weight"] remove_ignore_keys_(state_dict) rename_keys(state_dict) tie_embeds = True ffn_dim = state_dict["decoder.layers.0.fc1.weight"].shape[0] config = WhisperConfig( vocab_size=dimensions["n_vocab"], encoder_ffn_dim=ffn_dim, decoder_ffn_dim=ffn_dim, num_mel_bins=dimensions["n_mels"], d_model=dimensions["n_audio_state"], max_target_positions=dimensions["n_text_ctx"], encoder_layers=dimensions["n_audio_layer"], encoder_attention_heads=dimensions["n_audio_head"], decoder_layers=dimensions["n_text_layer"], decoder_attention_heads=dimensions["n_text_state"], max_source_positions=dimensions["n_audio_ctx"], ) model = WhisperForConditionalGeneration(config) missing, unexpected = model.model.load_state_dict(state_dict, strict=False) if len(missing) > 0 and not set(missing) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f" but all the following weights are missing {missing}" ) if tie_embeds: model.proj_out = make_linear_from_emb(model.model.decoder.embed_tokens) else: model.proj_out.weight.data = proj_out_weights model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # # Required parameters parser.add_argument("--checkpoint_path", type=str, help="Patht to the downloaded checkpoints") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") args = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
-1